positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def write_pa11y_results(item, pa11y_results, data_dir): """ Write the output from pa11y into a data file. """ data = dict(item) data['pa11y'] = pa11y_results # it would be nice to use the URL as the filename, # but that gets complicated (long URLs, special characters, etc) # so we'll make the filename a hash of the URL instead, # and throw in the access time so that we can store the same URL # multiple times in this data directory hasher = hashlib.md5() hasher.update(item["url"].encode('utf8')) hasher.update(item["accessed_at"].isoformat().encode('utf8')) basename = hasher.hexdigest() filename = basename + ".json" filepath = data_dir / filename data_dir.makedirs_p() text = json.dumps(data, cls=DateTimeEncoder) filepath.write_text(text)
Write the output from pa11y into a data file.
def _replace_none(self, aDict): """ Replace all None values in a dict with 'none' """ for k, v in aDict.items(): if v is None: aDict[k] = 'none'
Replace all None values in a dict with 'none'
def CrearAjusteBase(self, pto_emision=1, nro_orden=None, # unificado, contrato, papel coe_ajustado=None, # unificado nro_contrato=None, # contrato tipo_formulario=None, # papel nro_formulario=None, # papel actividad=None, # contrato / papel cod_grano=None, # contrato / papel cuit_vendedor=None, # contrato / papel cuit_comprador=None, # contrato / papel cuit_corredor=None, # contrato / papel nro_ing_bruto_vendedor=None, # papel nro_ing_bruto_comprador=None, # papel nro_ing_bruto_corredor=None, # papel tipo_operacion=None, # papel precio_ref_tn=None, # contrato cod_grado_ent=None, # contrato val_grado_ent=None, # contrato precio_flete_tn=None, # contrato cod_puerto=None, # contrato des_puerto_localidad=None, # contrato cod_provincia=None, # unificado, contrato, papel cod_localidad=None, # unificado, contrato, papel comision_corredor=None, # papel **kwargs ): "Inicializa internamente los datos de una liquidación para ajustar" # ajusto nombre de campos para compatibilidad hacia atrás (encabezado): if 'cod_localidad_procedencia' in kwargs: cod_localidad = kwargs['cod_localidad_procedencia'] if 'cod_provincia_procedencia' in kwargs: cod_provincia = kwargs['cod_provincia_procedencia'] if 'nro_act_comprador' in kwargs: actividad = kwargs['nro_act_comprador'] if 'cod_tipo_operacion' in kwargs: tipo_operacion = kwargs['cod_tipo_operacion'] # limpio los campos especiales (segun validaciones de AFIP) if val_grado_ent == 0: val_grado_ent = None # borrando datos si no corresponden if cuit_corredor and int(cuit_corredor) == 0: cuit_corredor = None comision_corredor = None nro_ing_bruto_corredor = None if cod_puerto and int(cod_puerto) != 14: des_puerto_localidad = None # validacion 1630 # limpio los campos opcionales para no enviarlos si no corresponde: if cod_grado_ent == "": cod_grado_ent = None if val_grado_ent == 0: val_grado_ent = None # creo el diccionario con los campos generales del ajuste base: self.ajuste = { 'ajusteBase': { 'ptoEmision': pto_emision, 'nroOrden': nro_orden, 'coeAjustado': coe_ajustado, 'nroContrato': nro_contrato, 'tipoFormulario': tipo_formulario, 'nroFormulario': nro_formulario, 'actividad': actividad, 'codGrano': cod_grano, 'cuitVendedor': cuit_vendedor, 'cuitComprador': cuit_comprador, 'cuitCorredor': cuit_corredor, 'nroIngBrutoVendedor': nro_ing_bruto_vendedor, 'nroIngBrutoComprador': nro_ing_bruto_comprador, 'nroIngBrutoCorredor': nro_ing_bruto_corredor, 'tipoOperacion': tipo_operacion, 'codPuerto': cod_puerto, 'desPuertoLocalidad': des_puerto_localidad, 'comisionCorredor': comision_corredor, 'precioRefTn': precio_ref_tn, 'codGradoEnt': cod_grado_ent, 'valGradoEnt': val_grado_ent, 'precioFleteTn': precio_flete_tn, 'codLocalidad': cod_localidad, 'codProv': cod_provincia, 'certificados': [], } } # para compatibilidad con AgregarCertificado self.liquidacion = self.ajuste['ajusteBase'] # inicializar temporales self.__ajuste_base = None self.__ajuste_debito = None self.__ajuste_credito = None return True
Inicializa internamente los datos de una liquidación para ajustar
def set_timestamp(cls, filename: str, response: HTTPResponse): '''Set the Last-Modified timestamp onto the given file. Args: filename: The path of the file response: Response ''' last_modified = response.fields.get('Last-Modified') if not last_modified: return try: last_modified = email.utils.parsedate(last_modified) except ValueError: _logger.exception('Failed to parse date.') return last_modified = time.mktime(last_modified) os.utime(filename, (time.time(), last_modified))
Set the Last-Modified timestamp onto the given file. Args: filename: The path of the file response: Response
def _Open(self, path_spec=None, mode='rb'): """Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec: raise ValueError('Missing path specfication.') volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec) if volume_index is None: raise errors.PathSpecError( 'Unable to retrieve volume index from path specification.') self._file_system = resolver.Resolver.OpenFileSystem( path_spec, resolver_context=self._resolver_context) vslvm_volume_group = self._file_system.GetLVMVolumeGroup() if (volume_index < 0 or volume_index >= vslvm_volume_group.number_of_logical_volumes): raise errors.PathSpecError(( 'Unable to retrieve LVM logical volume index: {0:d} from path ' 'specification.').format(volume_index)) self._vslvm_logical_volume = vslvm_volume_group.get_logical_volume( volume_index)
Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
def unmarshal(self, value, bind_client=None): """ Cast the specified value to the entity type. """ #self.log.debug("Unmarshall {0!r}: {1!r}".format(self, value)) if not isinstance(value, self.type): o = self.type() if bind_client is not None and hasattr(o.__class__, 'bind_client'): o.bind_client = bind_client if isinstance(value, dict): for (k, v) in value.items(): if not hasattr(o.__class__, k): self.log.warning("Unable to set attribute {0} on entity {1!r}".format(k, o)) else: #self.log.debug("Setting attribute {0} on entity {1!r}".format(k, o)) setattr(o, k, v) value = o else: raise Exception("Unable to unmarshall object {0!r}".format(value)) return value
Cast the specified value to the entity type.
def parse_record(raw_record, is_training, dtype): """Parses a record containing a training example of an image. The input record is parsed into a label and image, and the image is passed through preprocessing steps (cropping, flipping, and so on). Args: raw_record: scalar Tensor tf.string containing a serialized Example protocol buffer. is_training: A boolean denoting whether the input is for training. dtype: data type to use for images/features. Returns: Tuple with processed image tensor and one-hot-encoded label tensor. """ image_buffer, label = _parse_example_proto(raw_record) image = imagenet_preprocessing.preprocess_image( image_buffer=image_buffer, output_height=_DEFAULT_IMAGE_SIZE, output_width=_DEFAULT_IMAGE_SIZE, num_channels=_NUM_CHANNELS, is_training=is_training) image = tf.cast(image, dtype) return image, label
Parses a record containing a training example of an image. The input record is parsed into a label and image, and the image is passed through preprocessing steps (cropping, flipping, and so on). Args: raw_record: scalar Tensor tf.string containing a serialized Example protocol buffer. is_training: A boolean denoting whether the input is for training. dtype: data type to use for images/features. Returns: Tuple with processed image tensor and one-hot-encoded label tensor.
def slistStr(slist): """ Converts signed list to angle string. """ slist = _fixSlist(slist) string = ':'.join(['%02d' % x for x in slist[1:]]) return slist[0] + string
Converts signed list to angle string.
def _add_conversation(self, conversation): """ Add the conversation and fire the :meth:`on_conversation_added` event. :param conversation: The conversation object to add. :type conversation: :class:`~.AbstractConversation` The conversation is added to the internal list of conversations which can be queried at :attr:`conversations`. The :meth:`on_conversation_added` event is fired. In addition, the :class:`ConversationService` subscribes to the :meth:`~.AbstractConversation.on_exit` event to remove the conversation from the list automatically. There is no need to remove a conversation from the list explicitly. """ handler = functools.partial( self._handle_conversation_exit, conversation ) tokens = [] def linked_token(signal, handler): return signal, signal.connect(handler) tokens.append(linked_token(conversation.on_exit, handler)) tokens.append(linked_token(conversation.on_failure, handler)) tokens.append(linked_token(conversation.on_message, functools.partial( self.on_message, conversation, ))) self._conversation_meta[conversation] = ( tokens, ) self._conversation_map[conversation.jid] = conversation self.on_conversation_added(conversation)
Add the conversation and fire the :meth:`on_conversation_added` event. :param conversation: The conversation object to add. :type conversation: :class:`~.AbstractConversation` The conversation is added to the internal list of conversations which can be queried at :attr:`conversations`. The :meth:`on_conversation_added` event is fired. In addition, the :class:`ConversationService` subscribes to the :meth:`~.AbstractConversation.on_exit` event to remove the conversation from the list automatically. There is no need to remove a conversation from the list explicitly.
def StringDecoder(field_number, is_repeated, is_packed, key, new_default): """Returns a decoder for a string field.""" local_DecodeVarint = _DecodeVarint local_unicode = six.text_type def _ConvertToUnicode(byte_str): try: return local_unicode(byte_str, 'utf-8') except UnicodeDecodeError as e: # add more information to the error message and re-raise it. e.reason = '%s in field: %s' % (e, key.full_name) raise assert not is_packed if is_repeated: tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) tag_len = len(tag_bytes) def DecodeRepeatedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) while 1: (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated string.') value.append(_ConvertToUnicode(buffer[pos:new_pos])) # Predict that the next tag is another copy of the same repeated field. pos = new_pos + tag_len if buffer[new_pos:pos] != tag_bytes or new_pos == end: # Prediction failed. Return. return new_pos return DecodeRepeatedField else: def DecodeField(buffer, pos, end, message, field_dict): (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated string.') field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos]) return new_pos return DecodeField
Returns a decoder for a string field.
def validate(element, reference=None, report_file=None): """ Checks if the :class:`Element <hl7apy.core.Element>` is a valid HL7 message according to the reference specified. If the reference is not specified, it will be used the official HL7 structures for the elements. In particular it checks: * the maximum and minimum number of occurrences for every child * that children are all allowed * the datatype of fields, components and subcomponents * the values, in particular the length and the adherence with the HL7 table, if one is specified It raises the first exception that it finds. If :attr:`report_file` is specified, it will create a file with all the errors that occur. :param element: :class:`Element <hl7apy.core.Element>`: The element to validate :param reference: the reference to use. Usually is None or a message profile object :param report_file: the name of the report file to create :return: The True if everything is ok :raises: :exc:`ValidationError <hl7apy.exceptions.ValidationError>`: when errors occur :raises: :exc:`ValidationWarning <hl7apy.exceptions.ValidationWarning>`: errors concerning the values """ from hl7apy.core import is_base_datatype def _check_z_element(el, errs, warns): if el.classname == 'Field': if is_base_datatype(el.datatype, el.version) or \ el.datatype == 'varies': return True elif el.datatype is not None: # if the datatype the is a complex datatype, the z element must follow the correct # structure of that datatype # Component just to search in the datatypes.... dt_struct = load_reference(el.datatype, 'Datatypes_Structs', el.version) ref = ('sequence', dt_struct, el.datatype, None, None, -1) _check_known_element(el, ref, errs, warns) for c in el.children: _is_valid(c, None, errs, warns) return True def _check_repetitions(el, children, cardinality, child_name, errs): children_num = len(children) min_repetitions, max_repetitions = cardinality if max_repetitions != -1: if children_num < min_repetitions: errs.append(ValidationError("Missing required child {}.{}".format(el.name, child_name))) elif children_num > max_repetitions: errs.append(ValidationError("Child limit exceeded {}.{}".format(child_name, el.name))) else: if children_num < min_repetitions: errs.append(ValidationError("Missing required child {}.{}".format(el.name, child_name))) def _check_table_compliance(el, ref, warns): table = ref[4] if table is not None: try: table_ref = load_reference(table, 'Table', el.version) except ChildNotFound: pass else: table_children = table_ref[1] if el.to_er7() not in table_children: warns.append(ValidationWarning("Value {} not in table {} in element {}.{}". format(el.to_er7(), table, el.parent.name, el.name))) def _check_length(el, ref, warns): max_length = ref[5] if -1 < max_length < len(el.to_er7()): warns.append(ValidationWarning("Exceeded max length ({}) of {}.{}". format(max_length, el.parent.name, el.name))) def _check_datatype(el, ref, errs): ref_datatype = ref[2] if el.datatype != ref_datatype: errs.append(ValidationError("Datatype {} is not correct for {}.{} (it must be {})". format(el.datatype, el.parent.name, el.name, ref[1]))) def _get_valid_children_info(ref): valid_children = {c[0] for c in ref[1]} children_refs = ref[1] return valid_children, children_refs def _get_child_reference_info(ref): child_name, cardinality = ref[0], ref[2] return child_name, cardinality def _check_known_element(el, ref, errs, warns): if ref is None: try: ref = load_reference(el.name, el.classname, el.version) except ChildNotFound: errs.append(ValidationError("Invalid element found: {}".format(el))) if ref[0] in ('sequence', 'choice'): element_children = {c.name for c in el.children if not c.is_z_element()} valid_children, valid_children_refs = _get_valid_children_info(ref) # check that the children are all allowed children if not element_children <= valid_children: errs.append(ValidationError("Invalid children detected for {}: {}". format(el, list(element_children - valid_children)))) # iterates the valid children for child_ref in valid_children_refs: # it gets the structure of the children to check child_name, cardinality = _get_child_reference_info(child_ref) try: # it gets all the occurrences of the children of a type children = el.children.get(child_name) except Exception: # TODO: it is due to the lack of element in the official reference files... should # we raise an exception here? pass else: _check_repetitions(el, children, cardinality, child_name, errs) # calls validation for every children for c in children: _is_valid(c, child_ref[1], errs, warns) # finally calls validation for z_elements z_children = [c for c in el.children if c.is_z_element()] for c in z_children: _is_valid(c, None, errs, warns) else: _check_table_compliance(el, ref, warns) _check_length(el, ref, warns) if el.datatype == 'varies': # TODO: it should check the real rule return True _check_datatype(el, ref, errs) # For complex datatypes element, the reference is the one of the datatype if not is_base_datatype(el.datatype, el.version): # Component just to search in the datatypes.... ref = load_reference(el.datatype, 'Datatypes_Structs', el.version) _is_valid(el, ref, errs, warns) def _is_valid(el, ref, errs, warns): if el.is_unknown(): errs.append(ValidationError("Unknown element found: {}.{}".format(el.parent, el))) return if el.is_z_element(): return _check_z_element(el, errs, warns) return _check_known_element(el, ref, errs, warns) errors = [] warnings = [] _is_valid(element, reference, errors, warnings) if report_file is not None: with open(report_file, "w") as f: for e in errors: f.write("Error: {}\n".format(e)) for w in warnings: f.write("Warning: {}\n".format(w)) if errors: raise errors[0] return True
Checks if the :class:`Element <hl7apy.core.Element>` is a valid HL7 message according to the reference specified. If the reference is not specified, it will be used the official HL7 structures for the elements. In particular it checks: * the maximum and minimum number of occurrences for every child * that children are all allowed * the datatype of fields, components and subcomponents * the values, in particular the length and the adherence with the HL7 table, if one is specified It raises the first exception that it finds. If :attr:`report_file` is specified, it will create a file with all the errors that occur. :param element: :class:`Element <hl7apy.core.Element>`: The element to validate :param reference: the reference to use. Usually is None or a message profile object :param report_file: the name of the report file to create :return: The True if everything is ok :raises: :exc:`ValidationError <hl7apy.exceptions.ValidationError>`: when errors occur :raises: :exc:`ValidationWarning <hl7apy.exceptions.ValidationWarning>`: errors concerning the values
def worksheets(self): """Returns a list of all :class:`worksheets <gsperad.models.Worksheet>` in a spreadsheet. """ sheet_data = self.fetch_sheet_metadata() return [Worksheet(self, x['properties']) for x in sheet_data['sheets']]
Returns a list of all :class:`worksheets <gsperad.models.Worksheet>` in a spreadsheet.
def _computeStatus(self, dfile, service): """Computes status for file, basically this means if more than one service handles the file, it will place a 'C' (for complicated) otherwise if status matches between all services, will place that status""" # If only one service requested if service: if not dfile['services'].has_key(service): return self.ST_UNTRACKED else: return dfile['services'][service]['status'] # Otherwise go through all services and compute # a sensible status first_service_key=dfile['services'].keys()[0] # Save off one of the statuses so we can compute # if they are all the same between services. first_status=dfile['services'][first_service_key]['status'] all_status_match=True # Return ST_COMPLICATED "C" if status # differs for service in dfile['services']: if dfile['services'][service]['status']!=first_status: return self.ST_COMPLICATED return first_status
Computes status for file, basically this means if more than one service handles the file, it will place a 'C' (for complicated) otherwise if status matches between all services, will place that status
def validate(self): """Validate / fix up the current config""" if not self.get('api_key'): raise ValueError("api_key not found in config. Please see documentation.") host = self.get('host') or DEFAULT_CLOUD_HOST if host: # remove extraneous slashes and force to byte string # otherwise msg += message_body in httplib will fail in python2 # when message_body contains binary data, and url is unicode # remaining failure modes include at least: # passing bytes in python3 will fail as we try to strip unicode '/' characters # passing unicode code points in python2 will fail due to httplib host.encode('ascii') host = host.strip('/') if not isinstance(host, str): host = host.encode('utf-8') self['host'] = host self.setdefault('autostart_notification_thread', True)
Validate / fix up the current config
def is_Union(tp): """Python version independent check if a type is typing.Union. Tested with CPython 2.7, 3.5, 3.6 and Jython 2.7.1. """ if tp is Union: return True try: # Python 3.6 return tp.__origin__ is Union except AttributeError: try: return isinstance(tp, typing.UnionMeta) except AttributeError: return False
Python version independent check if a type is typing.Union. Tested with CPython 2.7, 3.5, 3.6 and Jython 2.7.1.
def load(self, config_template, config_file=None): """Read the config file if it exists, else read the default config. Creates the user config file if it doesn't exist using the template. :type config_template: str :param config_template: The config template file name. :type config_file: str :param config_file: (Optional) The config file name. If None, the config_file name will be set to the config_template. :rtype: :class:`configobj.ConfigObj` :return: The config information for reading and writing. """ if config_file is None: config_file = config_template config_path = build_config_file_path(config_file) template_path = os.path.join(os.path.dirname(__file__), config_template) self._copy_template_to_config(template_path, config_path) return self._load_template_or_config(template_path, config_path)
Read the config file if it exists, else read the default config. Creates the user config file if it doesn't exist using the template. :type config_template: str :param config_template: The config template file name. :type config_file: str :param config_file: (Optional) The config file name. If None, the config_file name will be set to the config_template. :rtype: :class:`configobj.ConfigObj` :return: The config information for reading and writing.
def gps_velocity_old(GPS_RAW_INT): '''return GPS velocity vector''' return Vector3(GPS_RAW_INT.vel*0.01*cos(radians(GPS_RAW_INT.cog*0.01)), GPS_RAW_INT.vel*0.01*sin(radians(GPS_RAW_INT.cog*0.01)), 0)
return GPS velocity vector
def from_mat_file(cls, matfilename): """Load gyro data from .mat file The MAT file should contain the following two arrays gyro : (3, N) float ndarray The angular velocity measurements. timestamps : (N, ) float ndarray Timestamps of the measurements. Parameters --------------- matfilename : string Name of the .mat file Returns ---------------- A new IMU class instance """ M = scipy.io.loadmat(matfilename) instance = cls() instance.gyro_data = M['gyro'] instance.timestamps = M['timestamps'] return instance
Load gyro data from .mat file The MAT file should contain the following two arrays gyro : (3, N) float ndarray The angular velocity measurements. timestamps : (N, ) float ndarray Timestamps of the measurements. Parameters --------------- matfilename : string Name of the .mat file Returns ---------------- A new IMU class instance
def subnode(self, node): """Make `node` receiver's child.""" self.children.append(node) node.parent = self node.adjust_interleave(node.interleave)
Make `node` receiver's child.
def insert(self): """persist the field values of this orm""" ret = True schema = self.schema fields = self.depopulate(False) q = self.query q.set_fields(fields) pk = q.insert() if pk: fields = q.fields fields[schema.pk.name] = pk self._populate(fields) else: ret = False return ret
persist the field values of this orm
def takeChild(self, index): """ Removes the child at the given index from this item. :param index | <int> """ item = super(XGanttWidgetItem, self).takeChild(index) if item: item.removeFromScene() return item
Removes the child at the given index from this item. :param index | <int>
def subset_bed_by_chrom(in_file, chrom, data, out_dir=None): """Subset a BED file to only have items from the specified chromosome. """ if out_dir is None: out_dir = os.path.dirname(in_file) base, ext = os.path.splitext(os.path.basename(in_file)) out_file = os.path.join(out_dir, "%s-%s%s" % (base, chrom, ext)) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: _rewrite_bed_with_chrom(in_file, tx_out_file, chrom) return out_file
Subset a BED file to only have items from the specified chromosome.
def format(self, member_info: bool = False): """ :param member_info: If True, adds also chat member info. Please, note that this additional info requires to make ONE api call. """ user = self.api_object self.__format_user(user) if member_info and self.chat.type != CHAT_TYPE_PRIVATE: self._add_empty() self.__format_member(user)
:param member_info: If True, adds also chat member info. Please, note that this additional info requires to make ONE api call.
def namespace_uri(self): """ Finds and returns first applied URI of this node that has a namespace. :return str: uri """ try: return next( iter(filter(lambda uri: URI(uri).namespace, self._uri)) ) except StopIteration: return None
Finds and returns first applied URI of this node that has a namespace. :return str: uri
def exhaust_stream(f): """Helper decorator for methods that exhausts the stream on return.""" def wrapper(self, stream, *args, **kwargs): try: return f(self, stream, *args, **kwargs) finally: exhaust = getattr(stream, "exhaust", None) if exhaust is not None: exhaust() else: while 1: chunk = stream.read(1024 * 64) if not chunk: break return update_wrapper(wrapper, f)
Helper decorator for methods that exhausts the stream on return.
def lookup(self, key): """ Return the list of values in the RDD for key `key`. This operation is done efficiently if the RDD has a known partitioner by only searching the partition that the key maps to. >>> l = range(1000) >>> rdd = sc.parallelize(zip(l, l), 10) >>> rdd.lookup(42) # slow [42] >>> sorted = rdd.sortByKey() >>> sorted.lookup(42) # fast [42] >>> sorted.lookup(1024) [] >>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey() >>> list(rdd2.lookup(('a', 'b'))[0]) ['c'] """ values = self.filter(lambda kv: kv[0] == key).values() if self.partitioner is not None: return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)]) return values.collect()
Return the list of values in the RDD for key `key`. This operation is done efficiently if the RDD has a known partitioner by only searching the partition that the key maps to. >>> l = range(1000) >>> rdd = sc.parallelize(zip(l, l), 10) >>> rdd.lookup(42) # slow [42] >>> sorted = rdd.sortByKey() >>> sorted.lookup(42) # fast [42] >>> sorted.lookup(1024) [] >>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey() >>> list(rdd2.lookup(('a', 'b'))[0]) ['c']
def _search_type_in_type_comment(self, code): """ For more info see: https://www.python.org/dev/peps/pep-0484/#type-comments >>> AssignmentProvider()._search_type_in_type_comment('type: int') ['int'] """ for p in self.PEP0484_TYPE_COMMENT_PATTERNS: match = p.search(code) if match: return [match.group(1)]
For more info see: https://www.python.org/dev/peps/pep-0484/#type-comments >>> AssignmentProvider()._search_type_in_type_comment('type: int') ['int']
def jacobi(a, b): '''Calculates the value of the Jacobi symbol (a/b) where both a and b are positive integers, and b is odd :returns: -1, 0 or 1 ''' assert a > 0 assert b > 0 if a == 0: return 0 result = 1 while a > 1: if a & 1: if ((a-1)*(b-1) >> 2) & 1: result = -result a, b = b % a, a else: if (((b * b) - 1) >> 3) & 1: result = -result a >>= 1 if a == 0: return 0 return result
Calculates the value of the Jacobi symbol (a/b) where both a and b are positive integers, and b is odd :returns: -1, 0 or 1
def validipaddr(address): """ Returns True if `address` is a valid IPv4 address. >>> validipaddr('192.168.1.1') True >>> validipaddr('192.168.1.800') False >>> validipaddr('192.168.1') False """ try: octets = address.split('.') if len(octets) != 4: return False for x in octets: if not (0 <= int(x) <= 255): return False except ValueError: return False return True
Returns True if `address` is a valid IPv4 address. >>> validipaddr('192.168.1.1') True >>> validipaddr('192.168.1.800') False >>> validipaddr('192.168.1') False
def remove_triple( self, subj: URIRef, pred: URIRef, obj: Union[URIRef, Literal] ) -> None: """ Removes triple from rdflib Graph You must input the triple in its URIRef or Literal form for each node exactly the way it was inputed or it will not delete the triple. Args: subj: Entity subject to be removed it its the only node with this subject; else this is just going to delete a desciption I.E. predicate_object of this entity. pred: Entity predicate to be removed obj: Entity object to be removed """ self.g.remove( (subj, pred, obj) )
Removes triple from rdflib Graph You must input the triple in its URIRef or Literal form for each node exactly the way it was inputed or it will not delete the triple. Args: subj: Entity subject to be removed it its the only node with this subject; else this is just going to delete a desciption I.E. predicate_object of this entity. pred: Entity predicate to be removed obj: Entity object to be removed
def indent(self, text, n_indents=1, skipping=False): """ Indent text with single spaces. Parameters ---------- :param text : string The text which get a specific indentation. :param n_indents : int, default: 1 The number of indentations. :param skipping : boolean, default: False Whether to skip the initial indentation. Returns ------- return : string The indented text. """ lines = text.splitlines() space = self.TEMPLATES.get(self.target_language).get('indent', ' ') # Single line: if len(lines) == 1: if skipping: return text.strip() return n_indents * space + text.strip() # Multiple lines: indented_lines = [] for idx, line in enumerate(lines): if skipping and idx is 0: indented_lines.append(line) else: line = n_indents * space + line indented_lines.append(line) indented_text = '\n'.join(indented_lines) return indented_text
Indent text with single spaces. Parameters ---------- :param text : string The text which get a specific indentation. :param n_indents : int, default: 1 The number of indentations. :param skipping : boolean, default: False Whether to skip the initial indentation. Returns ------- return : string The indented text.
def display(self, image): """ Takes a 1-bit :py:mod:`PIL.Image` and dumps it to the OLED display. :param image: Image to display. :type image: :py:mod:`PIL.Image` """ assert(image.mode == self.mode) assert(image.size == self.size) image = self.preprocess(image) self.command( # Column start/end address self._const.COLUMNADDR, self._colstart, self._colend - 1, # Page start/end address self._const.PAGEADDR, 0x00, self._pages - 1) buf = bytearray(self._w * self._pages) off = self._offsets mask = self._mask idx = 0 for pix in image.getdata(): if pix > 0: buf[off[idx]] |= mask[idx] idx += 1 self.data(list(buf))
Takes a 1-bit :py:mod:`PIL.Image` and dumps it to the OLED display. :param image: Image to display. :type image: :py:mod:`PIL.Image`
def load_lexer_from_file(filename, lexername="CustomLexer", **options): """Load a lexer from a file. This method expects a file located relative to the current working directory, which contains a Lexer class. By default, it expects the Lexer to be name CustomLexer; you can specify your own class name as the second argument to this function. Users should be very careful with the input, because this method is equivalent to running eval on the input file. Raises ClassNotFound if there are any problems importing the Lexer. .. versionadded:: 2.2 """ try: # This empty dict will contain the namespace for the exec'd file custom_namespace = {} exec(open(filename, 'rb').read(), custom_namespace) # Retrieve the class `lexername` from that namespace if lexername not in custom_namespace: raise ClassNotFound('no valid %s class found in %s' % (lexername, filename)) lexer_class = custom_namespace[lexername] # And finally instantiate it with the options return lexer_class(**options) except IOError as err: raise ClassNotFound('cannot read %s' % filename) except ClassNotFound as err: raise except Exception as err: raise ClassNotFound('error when loading custom lexer: %s' % err)
Load a lexer from a file. This method expects a file located relative to the current working directory, which contains a Lexer class. By default, it expects the Lexer to be name CustomLexer; you can specify your own class name as the second argument to this function. Users should be very careful with the input, because this method is equivalent to running eval on the input file. Raises ClassNotFound if there are any problems importing the Lexer. .. versionadded:: 2.2
def short_form_one_format(jupytext_format): """Represent one jupytext format as a string""" if not isinstance(jupytext_format, dict): return jupytext_format fmt = jupytext_format['extension'] if 'suffix' in jupytext_format: fmt = jupytext_format['suffix'] + fmt elif fmt.startswith('.'): fmt = fmt[1:] if 'prefix' in jupytext_format: fmt = jupytext_format['prefix'] + '/' + fmt if jupytext_format.get('format_name'): if jupytext_format['extension'] not in ['.md', '.Rmd'] or jupytext_format['format_name'] == 'pandoc': fmt = fmt + ':' + jupytext_format['format_name'] return fmt
Represent one jupytext format as a string
def trigger_keyphrases( text = None, # input text to parse keyphrases = None, # keyphrases for parsing input text response = None, # optional text response on trigger function = None, # optional function on trigger kwargs = None, # optional function keyword arguments confirm = False, # optional return of confirmation confirmation_prompt = "Do you want to continue? (y/n)", confirmation_feedback_confirm = "confirm", confirmation_feedback_deny = "deny" ): """ Parse input text for keyphrases. If any keyphrases are found, respond with text or by seeking confirmation or by engaging a function with optional keyword arguments. Return text or True if triggered and return False if not triggered. If confirmation is required, a confirmation object is returned, encapsulating a function and its optional arguments. """ if any(pattern in text for pattern in keyphrases): if confirm: return confirmation( prompt = confirmation_prompt, feedback_confirm = confirmation_feedback_confirm, feedback_deny = confirmation_feedback_deny, function = function, kwargs = kwargs ) if function and not kwargs: result = function() elif function and kwargs: result = function(**kwargs) else: result = None if response: return response elif not response and result: return str(result) else: return True else: return False
Parse input text for keyphrases. If any keyphrases are found, respond with text or by seeking confirmation or by engaging a function with optional keyword arguments. Return text or True if triggered and return False if not triggered. If confirmation is required, a confirmation object is returned, encapsulating a function and its optional arguments.
def batch_remove_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11): """ Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 13
def lookups(self, request, model_admin): """ Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar. """ return [(q.slug, q.title) for q in Question.objects.all()]
Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar.
def get_handler_fp(logger): """ Get handler_fp. This method is integrated to LoggerFactory Object in the future. :param logging.Logger logger: Python logging.Logger. logger instance. :rtype: logging.Logger.handlers.BaseRotatingHandler :return: Handler or Handler's stream. We call it `handler_fp`. """ if not hasattr(logger, 'handlers'): raise blackbird.utils.error.BlackbirdError( 'Given logger is not logging.Logger instance!' ) if len(logger.handlers) != 1: raise blackbird.utils.error.BlackbirdError( 'Given logger has invalid handlers.' ) if hasattr(logger.handlers[0], 'stream'): return logger.handlers[0].stream # case of setting SysLogHandler to logger.handlers[0] return logger.handlers[0]
Get handler_fp. This method is integrated to LoggerFactory Object in the future. :param logging.Logger logger: Python logging.Logger. logger instance. :rtype: logging.Logger.handlers.BaseRotatingHandler :return: Handler or Handler's stream. We call it `handler_fp`.
def fromstring(cls, s, *args, **kwargs): """ Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in []. """ s = s.replace("\(", "&lparen;") s = s.replace("\)", "&rparen;") s = s.replace("\[", "&lbrack;") s = s.replace("\]", "&rbrack;") s = s.replace("\{", "&lcurly;") s = s.replace("\}", "&rcurly;") p = [] i = 0 for m in re.finditer(r"\[.*?\]|\(.*?\)", s): # Spaces in a range encapsulated in square brackets are encoded. # "[Windows Vista]" is one range, don't split on space. p.append(s[i:m.start()]) p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end() p.append(s[i:]) s = "".join(p) s = s.replace("][", "] [") s = s.replace(")(", ") (") s = s.replace("\|", "&vdash;") s = re.sub(r"\s+\|\s+", "|", s) s = re.sub(r"\s+", " ", s) s = re.sub(r"\{\s+", "{", s) s = re.sub(r"\s+\}", "}", s) s = s.split(" ") s = [v.replace("&space;"," ") for v in s] P = cls([], *args, **kwargs) G, O, i = [], [], 0 for s in s: constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY)) constraint.index = len(P.sequence) P.sequence.append(constraint) # Push a new group on the stack if string starts with "{". # Parse constraint from string, add it to all open groups. # Pop latest group from stack if string ends with "}". # Insert groups in opened-first order (i). while s.startswith("{"): s = s[1:] G.append((i, [])); i+=1 O.append([]) for g in G: g[1].append(constraint) while s.endswith("}"): s = s[:-1] if G: O[G[-1][0]] = G[-1][1]; G.pop() P.groups = [g for g in O if g] return P
Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in [].
def present(self, name, *args): """Require that an owner name (and optionally an rdata type, or specific rdataset) exists as a prerequisite to the execution of the update. The first argument is always a name. The other arguments can be: - rdataset... - rdata... - rdtype, string...""" if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) if len(args) == 0: rrset = self.find_rrset(self.answer, name, dns.rdataclass.ANY, dns.rdatatype.ANY, dns.rdatatype.NONE, None, True, True) elif isinstance(args[0], dns.rdataset.Rdataset) or \ isinstance(args[0], dns.rdata.Rdata) or \ len(args) > 1: if not isinstance(args[0], dns.rdataset.Rdataset): # Add a 0 TTL args = list(args) args.insert(0, 0) self._add(False, self.answer, name, *args) else: rdtype = args[0] if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) rrset = self.find_rrset(self.answer, name, dns.rdataclass.ANY, rdtype, dns.rdatatype.NONE, None, True, True)
Require that an owner name (and optionally an rdata type, or specific rdataset) exists as a prerequisite to the execution of the update. The first argument is always a name. The other arguments can be: - rdataset... - rdata... - rdtype, string...
def complete(self, match, subject_graph): """Check the completeness of the ring match""" if not CustomPattern.complete(self, match, subject_graph): return False if self.strong: # If the ring is not strong, return False if self.size % 2 == 0: # even ring for i in range(self.size//2): vertex1_start = match.forward[i] vertex1_stop = match.forward[(i+self.size//2)%self.size] paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop)) if len(paths) != 2: #print "Even ring must have two paths between opposite vertices" return False for path in paths: if len(path) != self.size//2+1: #print "Paths between opposite vertices must half the size of the ring+1" return False else: # odd ring for i in range(self.size//2+1): vertex1_start = match.forward[i] vertex1_stop = match.forward[(i+self.size//2)%self.size] paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop)) if len(paths) > 1: return False if len(paths[0]) != self.size//2+1: return False vertex1_stop = match.forward[(i+self.size//2+1)%self.size] paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop)) if len(paths) > 1: return False if len(paths[0]) != self.size//2+1: return False return True
Check the completeness of the ring match
def get_types(obj, **kwargs): """Get the types of an iterable.""" max_iterable_length = kwargs.get('max_iterable_length', 100000) it, = itertools.tee(obj, 1) s = set() too_big = False for i, v in enumerate(it): if i <= max_iterable_length: s.add(type(v)) else: too_big = True break return {"types": s, "too_big": too_big}
Get the types of an iterable.
def get(self, id): """ 根据 id 获取数据。 :param id: 要获取的数据的 id :return: 返回取到的数据,如果是空则返回一个空的 ``dict`` 对象 """ document = self._get_document(id) if document: session_json = document["session"] return json_loads(session_json) return {}
根据 id 获取数据。 :param id: 要获取的数据的 id :return: 返回取到的数据,如果是空则返回一个空的 ``dict`` 对象
def OnFind(self, event): """Find functionality, called from toolbar, returns find position""" # Search starts in next cell after the current one gridpos = list(self.grid.actions.cursor) text, flags = event.text, event.flags findpos = self.grid.actions.find(gridpos, text, flags) if findpos is None: # If nothing is found mention it in the statusbar and return statustext = _("'{text}' not found.").format(text=text) else: # Otherwise select cell with next occurrence if successful self.grid.actions.cursor = findpos # Update statusbar statustext = _(u"Found '{text}' in cell {key}.") statustext = statustext.format(text=text, key=findpos) post_command_event(self.grid.main_window, self.grid.StatusBarMsg, text=statustext) event.Skip()
Find functionality, called from toolbar, returns find position
def paste_from_clipboard(self): """ Pastes files from clipboard. """ to = self.get_current_path() if os.path.isfile(to): to = os.path.abspath(os.path.join(to, os.pardir)) mime = QtWidgets.QApplication.clipboard().mimeData() paste_operation = None if mime.hasFormat(self._UrlListMimeData.format(copy=True)): paste_operation = True elif mime.hasFormat(self._UrlListMimeData.format(copy=False)): paste_operation = False if paste_operation is not None: self._paste( self._UrlListMimeData.list_from(mime, copy=paste_operation), to, copy=paste_operation)
Pastes files from clipboard.
def DOM_setOuterHTML(self, nodeId, outerHTML): """ Function path: DOM.setOuterHTML Domain: DOM Method name: setOuterHTML Parameters: Required arguments: 'nodeId' (type: NodeId) -> Id of the node to set markup for. 'outerHTML' (type: string) -> Outer HTML markup to set. No return value. Description: Sets node HTML markup, returns new node id. """ assert isinstance(outerHTML, (str,) ), "Argument 'outerHTML' must be of type '['str']'. Received type: '%s'" % type( outerHTML) subdom_funcs = self.synchronous_command('DOM.setOuterHTML', nodeId=nodeId, outerHTML=outerHTML) return subdom_funcs
Function path: DOM.setOuterHTML Domain: DOM Method name: setOuterHTML Parameters: Required arguments: 'nodeId' (type: NodeId) -> Id of the node to set markup for. 'outerHTML' (type: string) -> Outer HTML markup to set. No return value. Description: Sets node HTML markup, returns new node id.
def initial_state(self) -> StateTensor: '''Returns the initial state tensor.''' s0 = [] for fluent in self._compiler.compile_initial_state(self._batch_size): s0.append(self._output_size(fluent)) s0 = tuple(s0) return s0
Returns the initial state tensor.
def set_level(self, position, channel=None): """Seek a specific value by specifying a float() from 0.0 to 1.0.""" try: position = float(position) except Exception as err: LOG.debug("HelperLevel.set_level: Exception %s" % (err,)) return False self.writeNodeData("LEVEL", position, channel)
Seek a specific value by specifying a float() from 0.0 to 1.0.
def upload_directory_contents(input_dict, environment_dict): """This function serves to upload every file in a user-supplied source directory to all of the vessels in the current target group. It essentially calls seash's `upload` function repeatedly, each time with a file name taken from the source directory. A note on the input_dict argument: `input_dict` contains our own `command_dict` (see below), with the `"[ARGUMENT]"` sub-key of `children` renamed to what argument the user provided. In our case, this will be the source dir to read from. (If not, this is an error!) """ # Check user input and seash state: # 1, Make sure there is an active user key. if environment_dict["currentkeyname"] is None: raise seash_exceptions.UserError("""Error: Please set an identity before using 'uploaddir'! Example: !> loadkeys your_user_name !> as your_user_name your_user_name@ !> """) # 2, Make sure there is a target to work on. if environment_dict["currenttarget"] is None: raise seash_exceptions.UserError("""Error: Please set a target to work on before using 'uploaddir'! Example your_user_name@ !> on browsegood your_user_name@browsegood !> """) # 3, Complain if we don't have a source dir argument try: source_directory = input_dict["uploaddir"]["children"].keys()[0] except IndexError: raise seash_exceptions.UserError("""Error: Missing operand to 'uploaddir' Please specify which source directory's contents you want uploaded, e.g. your_user_name@browsegood !> uploaddir a_local_directory """) # Sanity check: Does the source dir exist? if not os.path.exists(source_directory): raise seash_exceptions.UserError("Error: Source directory '" + source_directory + "' does not exist.") # Sanity check: Is the source dir a directory? if not os.path.isdir(source_directory): raise seash_exceptions.UserError("Error: Source directory '" + source_directory + "' is not a directory.\nDid you mean to use the 'upload' command instead?") # Alright --- user input and seash state seem sane, let's do the work! # These are the files we will need to upload: file_list = os.listdir(source_directory) for filename in file_list: # We construct the filename-to-be uploaded from the source dir, # the OS-specific path separator, and the actual file name. # This is enough for `upload_target` to find the file. path_and_filename = source_directory + os.sep + filename if not os.path.isdir(path_and_filename): print "Uploading '" + path_and_filename + "'..." # Construct an input_dict containing command args for seash's # `upload FILENAME` function. # XXX There might be a cleaner way to do this. faked_input_dict = {"upload": {"name": "upload", "children": {path_and_filename: {"name": "filename"}}}} command_callbacks.upload_filename(faked_input_dict, environment_dict) else: print "Skipping sub-directory '" + filename + "'. You may upload it separately."
This function serves to upload every file in a user-supplied source directory to all of the vessels in the current target group. It essentially calls seash's `upload` function repeatedly, each time with a file name taken from the source directory. A note on the input_dict argument: `input_dict` contains our own `command_dict` (see below), with the `"[ARGUMENT]"` sub-key of `children` renamed to what argument the user provided. In our case, this will be the source dir to read from. (If not, this is an error!)
def randomize(vm, length=(10,10), ints=(0,999), strs=(1,10), chars=(32,126), instruction_ratio=0.5, number_string_ratio=0.8, exclude=map(crianza.instructions.lookup, [".", "exit", "read", "write", "str"]), restrict_to=None): """Replaces existing code with completely random instructions. Does not optimize code after generating it. Args: length: Tuple of minimum and maximum code lengths. Code length will be a random number between these two, inclusive values. ints: Integers in the code will be selected at random from this inclusive range. strs: Inclusive range of the length of strings in the code. chars: Inclusive range of characters in random strings. instruction_ratio: Ratio of instructions to numbers/strings, meaning that if this value is 0.5 then there will just as many instructions in the code as there are numbers and strings. number_string_ratio: Ratio of numbers to strings. exclude: Excluded instructions. For genetic programming, one wants to avoid the program to hang for user input. The default value is to exclude console i/o and debug instructions. restrict_to: Limit instructions to the given list. Returns: The VM. """ vm.code = [] instructions = set(vm.instructions.values()) - set(exclude) if restrict_to is not None: instructions = instructions.intersection(set(restrict_to)) instructions = list(instructions) for _ in xrange(random.randint(*length)): r = random.random() if r <= instruction_ratio: # Generate a random instruction vm.code.append(random.choice(instructions)) elif r <= number_string_ratio: # Generate a random number vm.code.append(crianza.compiler.make_embedded_push(random.randint(*ints))) else: # Generate a random string vm.code.append(crianza.compiler.make_embedded_push('%s' % "".join(chr(random.randint(*chars)) for n in xrange(0, random.randint(*strs))))) return vm
Replaces existing code with completely random instructions. Does not optimize code after generating it. Args: length: Tuple of minimum and maximum code lengths. Code length will be a random number between these two, inclusive values. ints: Integers in the code will be selected at random from this inclusive range. strs: Inclusive range of the length of strings in the code. chars: Inclusive range of characters in random strings. instruction_ratio: Ratio of instructions to numbers/strings, meaning that if this value is 0.5 then there will just as many instructions in the code as there are numbers and strings. number_string_ratio: Ratio of numbers to strings. exclude: Excluded instructions. For genetic programming, one wants to avoid the program to hang for user input. The default value is to exclude console i/o and debug instructions. restrict_to: Limit instructions to the given list. Returns: The VM.
def remove_unnecessary_whitespace(css): """Remove unnecessary whitespace characters.""" def pseudoclasscolon(css): """ Prevents 'p :link' from becoming 'p:link'. Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'; this is translated back again later. """ regex = re.compile(r"(^|\})(([^\{\:])+\:)+([^\{]*\{)") match = regex.search(css) while match: css = ''.join([ css[:match.start()], match.group().replace(":", "___PSEUDOCLASSCOLON___"), css[match.end():]]) match = regex.search(css) return css css = pseudoclasscolon(css) # Remove spaces from before things. css = re.sub(r"\s+([!{};:>+\(\)\],])", r"\1", css) # If there is a `@charset`, then only allow one, and move to the beginning. css = re.sub(r"^(.*)(@charset \"[^\"]*\";)", r"\2\1", css) css = re.sub(r"^(\s*@charset [^;]+;\s*)+", r"\1", css) # Put the space back in for a few cases, such as `@media screen` and # `(-webkit-min-device-pixel-ratio:0)`. css = re.sub(r"\band\(", "and (", css) # Put the colons back. css = css.replace('___PSEUDOCLASSCOLON___', ':') # Remove spaces from after things. css = re.sub(r"([!{}:;>+\(\[,])\s+", r"\1", css) return css
Remove unnecessary whitespace characters.
def prepare(path, name): # type: (str, str) -> None """Prepare a Python script (or module) to be imported as a module. If the script does not contain a setup.py file, it creates a minimal setup. Args: path (str): path to directory with the script or module. name (str): name of the script or module. """ setup_path = os.path.join(path, 'setup.py') if not os.path.exists(setup_path): data = textwrap.dedent(""" from setuptools import setup setup(packages=[''], name="%s", version='1.0.0', include_package_data=True) """ % name) logger.info('Module %s does not provide a setup.py. \nGenerating setup.py' % name) _files.write_file(setup_path, data) data = textwrap.dedent(""" [wheel] universal = 1 """) logger.info('Generating setup.cfg') _files.write_file(os.path.join(path, 'setup.cfg'), data) data = textwrap.dedent(""" recursive-include . * recursive-exclude . __pycache__* recursive-exclude . *.pyc recursive-exclude . *.pyo """) logger.info('Generating MANIFEST.in') _files.write_file(os.path.join(path, 'MANIFEST.in'), data)
Prepare a Python script (or module) to be imported as a module. If the script does not contain a setup.py file, it creates a minimal setup. Args: path (str): path to directory with the script or module. name (str): name of the script or module.
def hasFeature(self, prop, check_softs=False): """Return if there is a property with that name.""" return prop in self.props or (check_softs and any([fs.hasFeature(prop) for fs in self.props.get(SoftFeatures.SOFT, [])]))
Return if there is a property with that name.
def rm(filename, recursive=False, force=False): """Removes a file or directory tree.""" return auto(remove_file, filename, recursive, force)
Removes a file or directory tree.
def _get_map_from_user_by_id(self, user, map_id): """ Get a mapfile owned by a user from the database by map_id. """ req = Session.query(Map).select_from(join(Map, User)) try: return req.filter(and_(User.login==user, Map.id==map_id)).one() except Exception, e: return None
Get a mapfile owned by a user from the database by map_id.
def named_module(name): """Returns a module given its name.""" module = __import__(name) packages = name.split(".")[1:] m = module for p in packages: m = getattr(m, p) return m
Returns a module given its name.
async def on_isupport_maxbans(self, value): """ Maximum entries in ban list. Replaced by MAXLIST. """ if 'MAXLIST' not in self._isupport: if not self._list_limits: self._list_limits = {} self._list_limits['b'] = int(value)
Maximum entries in ban list. Replaced by MAXLIST.
def rshares_to_steem (self, rshares): ''' Gets the reward pool balances then calculates rshares to steem ''' self.reward_pool_balances() return round( rshares * self.reward_balance / self.recent_claims * self.base, 4)
Gets the reward pool balances then calculates rshares to steem
def update(self): """ The function to draw a new frame for the particle system. """ # Spawn new particles if required if self.time_left > 0: self.time_left -= 1 for _ in range(self._count): new_particle = self._new_particle() if new_particle is not None: self.particles.append(new_particle) # Now draw them all for particle in self.particles: # Clear our the old particle last = particle.last() if last is not None: char, x, y, fg, attr, bg = last screen_data = self._screen.get_from(x, y) if self._blend and screen_data: index = self._find_colour(particle, 0, screen_data) - 1 fg, attr, bg = particle.colours[max(index, 0)] self._screen.print_at(" ", x, y, fg, attr, bg) if particle.time < particle.life_time: # Draw the new one char, x, y, fg, attr, bg = particle.next() screen_data = self._screen.get_from(x, y) if self._blend and screen_data: index = self._find_colour(particle, -1, screen_data) + 1 fg, attr, bg = \ particle.colours[min(index, len(particle.colours) - 1)] self._screen.print_at(char, x, y, fg, attr, bg) else: self.particles.remove(particle)
The function to draw a new frame for the particle system.
def gevent_worker(self): """ Process one task after another by calling the handler (`copy_file` or `copy_link`) method of the super class. """ while not self.task_queue.empty(): task_kwargs = self.task_queue.get() handler_type = task_kwargs.pop('handler_type') if handler_type == 'link': super(Command, self).link_file(**task_kwargs) else: super(Command, self).copy_file(**task_kwargs)
Process one task after another by calling the handler (`copy_file` or `copy_link`) method of the super class.
def getresource(self, schemacls, name): """Get a resource from a builder name. :param type schemacls: waited schema class. :param str name: builder name to use. :return: resource returned by the right builder.getresource(schema). """ return _SCHEMAFACTORY.getresource(schemacls=schemacls, name=name)
Get a resource from a builder name. :param type schemacls: waited schema class. :param str name: builder name to use. :return: resource returned by the right builder.getresource(schema).
def data_from_cli(opts): """Loads the data needed for a model from the given command-line options. Gates specifed on the command line are also applied. Parameters ---------- opts : ArgumentParser parsed args Argument options parsed from a command line string (the sort of thing returned by `parser.parse_args`). Returns ------- strain_dict : dict Dictionary of instruments -> `TimeSeries` strain. stilde_dict : dict Dictionary of instruments -> `FrequencySeries` strain. psd_dict : dict Dictionary of instruments -> `FrequencySeries` psds. """ # get gates to apply gates = gates_from_cli(opts) psd_gates = psd_gates_from_cli(opts) # get strain time series instruments = opts.instruments if opts.instruments is not None else [] strain_dict = strain_from_cli_multi_ifos(opts, instruments, precision="double") # apply gates if not waiting to overwhiten if not opts.gate_overwhitened: strain_dict = apply_gates_to_td(strain_dict, gates) # get strain time series to use for PSD estimation # if user has not given the PSD time options then use same data as analysis if opts.psd_start_time and opts.psd_end_time: logging.info("Will generate a different time series for PSD " "estimation") psd_opts = opts psd_opts.gps_start_time = psd_opts.psd_start_time psd_opts.gps_end_time = psd_opts.psd_end_time psd_strain_dict = strain_from_cli_multi_ifos(psd_opts, instruments, precision="double") # apply any gates logging.info("Applying gates to PSD data") psd_strain_dict = apply_gates_to_td(psd_strain_dict, psd_gates) elif opts.psd_start_time or opts.psd_end_time: raise ValueError("Must give --psd-start-time and --psd-end-time") else: psd_strain_dict = strain_dict # FFT strain and save each of the length of the FFT, delta_f, and # low frequency cutoff to a dict stilde_dict = {} length_dict = {} delta_f_dict = {} low_frequency_cutoff_dict = low_frequency_cutoff_from_cli(opts) for ifo in instruments: stilde_dict[ifo] = strain_dict[ifo].to_frequencyseries() length_dict[ifo] = len(stilde_dict[ifo]) delta_f_dict[ifo] = stilde_dict[ifo].delta_f # get PSD as frequency series psd_dict = psd_from_cli_multi_ifos( opts, length_dict, delta_f_dict, low_frequency_cutoff_dict, instruments, strain_dict=psd_strain_dict, precision="double") # apply any gates to overwhitened data, if desired if opts.gate_overwhitened and opts.gate is not None: logging.info("Applying gates to overwhitened data") # overwhiten the data for ifo in gates: stilde_dict[ifo] /= psd_dict[ifo] stilde_dict = apply_gates_to_fd(stilde_dict, gates) # unwhiten the data for the model for ifo in gates: stilde_dict[ifo] *= psd_dict[ifo] return strain_dict, stilde_dict, psd_dict
Loads the data needed for a model from the given command-line options. Gates specifed on the command line are also applied. Parameters ---------- opts : ArgumentParser parsed args Argument options parsed from a command line string (the sort of thing returned by `parser.parse_args`). Returns ------- strain_dict : dict Dictionary of instruments -> `TimeSeries` strain. stilde_dict : dict Dictionary of instruments -> `FrequencySeries` strain. psd_dict : dict Dictionary of instruments -> `FrequencySeries` psds.
def get_installed_extjs_apps(): """ Get all installed extjs apps. :return: List of ``(appdir, module, appname)``. """ installed_apps = [] checked = set() for app in settings.INSTALLED_APPS: if not app.startswith('django.') and not app in checked: checked.add(app) try: installed_apps.append(get_appinfo(app)) except LookupError, e: pass return installed_apps
Get all installed extjs apps. :return: List of ``(appdir, module, appname)``.
def parse_date(date, default=None): """ Parse a valid date """ if date == "": if default is not None: return default else: raise Exception("Unknown format for " + date) for format_type in ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d %H", "%Y-%m-%d", "%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M", "%d/%m/%Y %H", "%d/%m/%Y"]: try: return datetime.strptime(date, format_type) except ValueError: pass raise Exception("Unknown format for " + date)
Parse a valid date
def predict_cumulative_hazard(self, X, times=None, ancillary_X=None): """ Return the cumulative hazard rate of subjects in X at time points. Parameters ---------- X: numpy array or DataFrame a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. times: iterable, optional an iterable of increasing times to predict the cumulative hazard at. Default is the set of all durations (observed and unobserved). Uses a linear interpolation if points in time are not in the index. ancillary_X: numpy array or DataFrame, optional a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. Returns ------- cumulative_hazard_ : DataFrame the cumulative hazard of individuals over the timeline """ times = coalesce(times, self.timeline, np.unique(self.durations)) alpha_, beta_ = self._prep_inputs_for_prediction_and_return_scores(X, ancillary_X) return pd.DataFrame(np.log1p(np.outer(times, 1 / alpha_) ** beta_), columns=_get_index(X), index=times)
Return the cumulative hazard rate of subjects in X at time points. Parameters ---------- X: numpy array or DataFrame a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. times: iterable, optional an iterable of increasing times to predict the cumulative hazard at. Default is the set of all durations (observed and unobserved). Uses a linear interpolation if points in time are not in the index. ancillary_X: numpy array or DataFrame, optional a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. Returns ------- cumulative_hazard_ : DataFrame the cumulative hazard of individuals over the timeline
def get_art(cache_dir, size, client): """Get the album art.""" song = client.currentsong() if len(song) < 2: print("album: Nothing currently playing.") return file_name = f"{song['artist']}_{song['album']}_{size}.jpg".replace("/", "") file_name = cache_dir / file_name if file_name.is_file(): shutil.copy(file_name, cache_dir / "current.jpg") print("album: Found cached art.") else: print("album: Downloading album art...") brainz.init() album_art = brainz.get_cover(song, size) if album_art: util.bytes_to_file(album_art, cache_dir / file_name) util.bytes_to_file(album_art, cache_dir / "current.jpg") print(f"album: Swapped art to {song['artist']}, {song['album']}.")
Get the album art.
def get_interface(interface): """Support Centos standard physical interface, such as eth0. """ # Supported CentOS Version supported_dists = ['7.0', '6.5'] def format_centos_7_0(inf): pattern = r'<([A-Z]+)' state = re.search(pattern, stdout[0]).groups()[0] state = 'UP' if not cmp(state, 'UP') else 'DOWN' inf.state = state stdout.pop(0) pattern = r'inet\s(.*)\s\snetmask\s(.*)\s\sbroadcast\s(.*)' for line in stdout: if line.startswith('inet '): tmp = re.search(pattern, line).groups() (inf.inet, inf.netmask, inf.broadcast) = tmp stdout.remove(line) break for line in stdout: if line.startswith('ether'): inf.ether = line[6:23] break return stdcode, '', inf.make_dict() def format_centos_6_5(inf): pattern = r'HWaddr\s(.*)' inf.ether = re.search(pattern, stdout[0]).groups()[0] stdout.pop(0) pattern = r'addr:(.*)\s\sBcast:(.*)\s\sMask:(.*)' for line in stdout: if line.startswith('inet '): tmp = re.search(pattern, line).groups() (inf.inet, inf.broadcast, inf.netmask) = tmp stdout.remove(line) break inf.state = 'DOWN' for line in stdout: if 'RUNNING' in line: state = line[:2] state = 'UP' if not cmp(state, 'UP') else 'DOWN' inf.state = state break return stdcode, '', inf.make_dict() linux_dist = platform.linux_distribution()[1][:3] if linux_dist in supported_dists: try: cmd = ['ifconfig', interface] stdcode, stdout = execute(cmd) inf = resource.Interface(interface) if not cmp(linux_dist, '6.5'): return format_centos_6_5(inf) elif not cmp(linux_dist, '7.0'): return format_centos_7_0(inf) except Exception as e: message = stdout.pop(0) return stdcode, message, None # Unsupported OS distribute message = 'Unsupported OS distribute %s, only support for CentOS %s.' message = message % (linux_dist, str(supported_dists)) return 1, message, None
Support Centos standard physical interface, such as eth0.
def crunch_dir(name, n=50): """Puts "..." in the middle of a directory name if lengh > n.""" if len(name) > n + 3: name = "..." + name[-n:] return name
Puts "..." in the middle of a directory name if lengh > n.
def alias_function(function, class_name): """Create a RedditContentObject function mapped to a BaseReddit function. The BaseReddit classes define the majority of the API's functions. The first argument for many of these functions is the RedditContentObject that they operate on. This factory returns functions appropriate to be called on a RedditContent object that maps to the corresponding BaseReddit function. """ @wraps(function) def wrapped(self, *args, **kwargs): func_args = _make_func_args(function) if 'subreddit' in func_args and func_args.index('subreddit') != 1: # Only happens for search kwargs['subreddit'] = self return function(self.reddit_session, *args, **kwargs) else: return function(self.reddit_session, self, *args, **kwargs) # Only grab the short-line doc and add a link to the complete doc if wrapped.__doc__ is not None: wrapped.__doc__ = wrapped.__doc__.split('\n', 1)[0] wrapped.__doc__ += ('\n\nSee :meth:`.{0}.{1}` for complete usage. ' 'Note that you should exclude the subreddit ' 'parameter when calling this convenience method.' .format(class_name, function.__name__)) # Don't hide from sphinx as this is a parameter modifying decorator return wrapped
Create a RedditContentObject function mapped to a BaseReddit function. The BaseReddit classes define the majority of the API's functions. The first argument for many of these functions is the RedditContentObject that they operate on. This factory returns functions appropriate to be called on a RedditContent object that maps to the corresponding BaseReddit function.
def fo_pct_by_zone(self): """ Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }`` """ bz = self.by_zone return { t: { z: bz[t][z]['won']/(1.0*bz[t][z]['total']) if bz[t][z]['total'] else 0.0 for z in self.__zones if z != 'all' } for t in [ 'home', 'away' ] }
Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }``
def update_member_profile(self, brief_details, profile_details): ''' a method to update user profile details on meetup :param brief_details: dictionary with member brief details with updated values :param profile_details: dictionary with member profile details with updated values :return: dictionary with partial profile details inside [json] key ''' # https://www.meetup.com/meetup_api/docs/members/:member_id/#edit title = '%s.update_member_profile' % self.__class__.__name__ # validate permissions if not 'profile_edit' in self.service_scope: raise ValueError('%s requires group_join as part of oauth2 service_scope permissions.' % title) # validate inputs brief_details = self.objects.profile_brief.validate(brief_details) profile_details = self.objects.profile.validate(profile_details) # construct request fields url = '%s/members/%s' % (self.endpoint, str(profile_details['id'])) params = { 'bio': profile_details['bio'], 'bio_privacy': profile_details['privacy']['bio'], 'fields': 'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats', 'gender': profile_details['gender'], 'groups_privacy': profile_details['privacy']['groups'], 'lang': brief_details['lang'].replace('_', '-'), 'lat': str(profile_details['lat']), 'lon': str(profile_details['lon']), 'messaging_pref': profile_details['messaging_pref'], 'name': profile_details['name'], 'photo_id': profile_details['photo']['id'], 'sync_photo': True, 'topics_privacy': profile_details['privacy']['topics'], 'zip': brief_details['zip'] } if profile_details['privacy']['facebook']: params['facebook_privacy'] = profile_details['privacy']['facebook'] birthday_value = False for key, value in profile_details['birthday'].items(): if value: birthday_value = True break if not birthday_value: params['birthday'] = '-1' else: birthday_string = '' b_day = profile_details['birthday'] if b_day['day'] and b_day['month']: if b_day['month'] < 10: birthday_string += '0' birthday_string += str(b_day['month']) if b_day['day'] < 10: birthday_string += '0' birthday_string += str(b_day['day']) birthday_string += str(b_day['year']) params['birthday'] = birthday_string # send requests profile_details = self._patch_request(url, params=params) return profile_details
a method to update user profile details on meetup :param brief_details: dictionary with member brief details with updated values :param profile_details: dictionary with member profile details with updated values :return: dictionary with partial profile details inside [json] key
def present(name, properties=None, filesystem_properties=None, layout=None, config=None): ''' ensure storage pool is present on the system name : string name of storage pool properties : dict optional set of properties to set for the storage pool filesystem_properties : dict optional set of filesystem properties to set for the storage pool (creation only) layout: dict disk layout to use if the pool does not exist (creation only) config : dict fine grain control over this state .. note:: The following configuration properties can be toggled in the config parameter. - import (true) - try to import the pool before creating it if absent - import_dirs (None) - specify additional locations to scan for devices on import (comma-seperated) - device_dir (None, SunOS=/dev/dsk, Linux=/dev) - specify device directory to prepend for none absolute device paths - force (false) - try to force the import or creation .. note:: It is no longer needed to give a unique name to each top-level vdev, the old layout format is still supported but no longer recommended. .. code-block:: yaml - mirror: - /tmp/vdisk3 - /tmp/vdisk2 - mirror: - /tmp/vdisk0 - /tmp/vdisk1 The above yaml will always result in the following zpool create: .. code-block:: bash zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1 .. warning:: The legacy format is also still supported but not recommended, because ID's inside the layout dict must be unique they need to have a suffix. .. code-block:: yaml mirror-0: /tmp/vdisk3 /tmp/vdisk2 mirror-1: /tmp/vdisk0 /tmp/vdisk1 .. warning:: Pay attention to the order of your dict! .. code-block:: yaml - mirror: - /tmp/vdisk0 - /tmp/vdisk1 - /tmp/vdisk2 The above will result in the following zpool create: .. code-block:: bash zpool create mypool mirror /tmp/vdisk0 /tmp/vdisk1 /tmp/vdisk2 Creating a 3-way mirror! While you probably expect it to be mirror root vdev with 2 devices + a root vdev of 1 device! ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} # config defaults default_config = { 'import': True, 'import_dirs': None, 'device_dir': None, 'force': False } if __grains__['kernel'] == 'SunOS': default_config['device_dir'] = '/dev/dsk' elif __grains__['kernel'] == 'Linux': default_config['device_dir'] = '/dev' # merge state config if config: default_config.update(config) config = default_config # ensure properties are zfs values if properties: properties = __utils__['zfs.from_auto_dict'](properties) elif properties is None: properties = {} if filesystem_properties: filesystem_properties = __utils__['zfs.from_auto_dict'](filesystem_properties) elif filesystem_properties is None: filesystem_properties = {} # parse layout vdevs = _layout_to_vdev(layout, config['device_dir']) if vdevs: vdevs.insert(0, name) # log configuration log.debug('zpool.present::%s::config - %s', name, config) log.debug('zpool.present::%s::vdevs - %s', name, vdevs) log.debug('zpool.present::%s::properties - %s', name, properties) log.debug('zpool.present::%s::filesystem_properties - %s', name, filesystem_properties) # ensure the pool is present ret['result'] = False # don't do anything because this is a test if __opts__['test']: ret['result'] = True if __salt__['zpool.exists'](name): ret['changes'][name] = 'uptodate' else: ret['changes'][name] = 'imported' if config['import'] else 'created' ret['comment'] = 'storage pool {0} was {1}'.format(name, ret['changes'][name]) # update pool elif __salt__['zpool.exists'](name): ret['result'] = True # fetch current pool properties properties_current = __salt__['zpool.get'](name, parsable=True) # build list of properties to update properties_update = [] if properties: for prop in properties: # skip unexisting properties if prop not in properties_current: log.warning('zpool.present::%s::update - unknown property: %s', name, prop) continue # compare current and wanted value if properties_current[prop] != properties[prop]: properties_update.append(prop) # update pool properties for prop in properties_update: res = __salt__['zpool.set'](name, prop, properties[prop]) if res['set']: if name not in ret['changes']: ret['changes'][name] = {} ret['changes'][name][prop] = properties[prop] else: ret['result'] = False if ret['comment'] == '': ret['comment'] = 'The following properties were not updated:' ret['comment'] = '{0} {1}'.format(ret['comment'], prop) if ret['result']: ret['comment'] = 'properties updated' if ret['changes'] else 'no update needed' # import or create the pool (at least try to anyway) else: # import pool if config['import']: mod_res = __salt__['zpool.import']( name, force=config['force'], dir=config['import_dirs'], ) ret['result'] = mod_res['imported'] if ret['result']: ret['changes'][name] = 'imported' ret['comment'] = 'storage pool {0} was imported'.format(name) # create pool if not ret['result'] and vdevs: log.debug('zpool.present::%s::creating', name) # execute zpool.create mod_res = __salt__['zpool.create']( *vdevs, force=config['force'], properties=properties, filesystem_properties=filesystem_properties ) ret['result'] = mod_res['created'] if ret['result']: ret['changes'][name] = 'created' ret['comment'] = 'storage pool {0} was created'.format(name) elif 'error' in mod_res: ret['comment'] = mod_res['error'] else: ret['comment'] = 'could not create storage pool {0}'.format(name) # give up, we cannot import the pool and we do not have a layout to create it if not ret['result'] and not vdevs: ret['comment'] = 'storage pool {0} was not imported, no (valid) layout specified for creation'.format(name) return ret
ensure storage pool is present on the system name : string name of storage pool properties : dict optional set of properties to set for the storage pool filesystem_properties : dict optional set of filesystem properties to set for the storage pool (creation only) layout: dict disk layout to use if the pool does not exist (creation only) config : dict fine grain control over this state .. note:: The following configuration properties can be toggled in the config parameter. - import (true) - try to import the pool before creating it if absent - import_dirs (None) - specify additional locations to scan for devices on import (comma-seperated) - device_dir (None, SunOS=/dev/dsk, Linux=/dev) - specify device directory to prepend for none absolute device paths - force (false) - try to force the import or creation .. note:: It is no longer needed to give a unique name to each top-level vdev, the old layout format is still supported but no longer recommended. .. code-block:: yaml - mirror: - /tmp/vdisk3 - /tmp/vdisk2 - mirror: - /tmp/vdisk0 - /tmp/vdisk1 The above yaml will always result in the following zpool create: .. code-block:: bash zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1 .. warning:: The legacy format is also still supported but not recommended, because ID's inside the layout dict must be unique they need to have a suffix. .. code-block:: yaml mirror-0: /tmp/vdisk3 /tmp/vdisk2 mirror-1: /tmp/vdisk0 /tmp/vdisk1 .. warning:: Pay attention to the order of your dict! .. code-block:: yaml - mirror: - /tmp/vdisk0 - /tmp/vdisk1 - /tmp/vdisk2 The above will result in the following zpool create: .. code-block:: bash zpool create mypool mirror /tmp/vdisk0 /tmp/vdisk1 /tmp/vdisk2 Creating a 3-way mirror! While you probably expect it to be mirror root vdev with 2 devices + a root vdev of 1 device!
def sigma_cached(self, psd): """ Cache sigma calculate for use in tandem with the FilterBank class """ if not hasattr(self, '_sigmasq'): from pycbc.opt import LimitedSizeDict self._sigmasq = LimitedSizeDict(size_limit=2**5) key = id(psd) if not hasattr(psd, '_sigma_cached_key'): psd._sigma_cached_key = {} if key not in self._sigmasq or id(self) not in psd._sigma_cached_key: psd._sigma_cached_key[id(self)] = True # If possible, we precalculate the sigmasq vector for all possible waveforms if pycbc.waveform.waveform_norm_exists(self.approximant): if not hasattr(psd, 'sigmasq_vec'): psd.sigmasq_vec = {} if self.approximant not in psd.sigmasq_vec: psd.sigmasq_vec[self.approximant] = pycbc.waveform.get_waveform_filter_norm( self.approximant, psd, len(psd), psd.delta_f, self.f_lower) if not hasattr(self, 'sigma_scale'): # Get an amplitude normalization (mass dependant constant norm) amp_norm = pycbc.waveform.get_template_amplitude_norm( self.params, approximant=self.approximant) amp_norm = 1 if amp_norm is None else amp_norm self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0 self._sigmasq[key] = self.sigma_scale * \ psd.sigmasq_vec[self.approximant][self.end_idx-1] else: if not hasattr(self, 'sigma_view'): from pycbc.filter.matchedfilter import get_cutoff_indices N = (len(self) -1) * 2 kmin, kmax = get_cutoff_indices( self.min_f_lower or self.f_lower, self.end_frequency, self.delta_f, N) self.sslice = slice(kmin, kmax) self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f if not hasattr(psd, 'invsqrt'): psd.invsqrt = 1.0 / psd[self.sslice] self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt) return self._sigmasq[key]
Cache sigma calculate for use in tandem with the FilterBank class
def get(self, section, option, default=NoDefault): """ Get an option section=None: attribute a default section name default: default value (if not specified, an exception will be raised if option doesn't exist) """ section = self._check_section_option(section, option) if not self.has_section(section): if default is NoDefault: raise cp.NoSectionError(section) else: self.add_section(section) if not self.has_option(section, option): if default is NoDefault: raise cp.NoOptionError(option, section) else: self.set(section, option, default) return default value = cp.ConfigParser.get(self, section, option, raw=self.raw) # Use type of default_value to parse value correctly default_value = self.get_default(section, option) if isinstance(default_value, bool): value = ast.literal_eval(value) elif isinstance(default_value, float): value = float(value) elif isinstance(default_value, int): value = int(value) elif is_text_string(default_value): if PY2: try: value = value.decode('utf-8') try: # Some str config values expect to be eval after decoding new_value = ast.literal_eval(value) if is_text_string(new_value): value = new_value except (SyntaxError, ValueError): pass except (UnicodeEncodeError, UnicodeDecodeError): pass else: try: # lists, tuples, ... value = ast.literal_eval(value) except (SyntaxError, ValueError): pass return value
Get an option section=None: attribute a default section name default: default value (if not specified, an exception will be raised if option doesn't exist)
def run_attacks(self): """Method which evaluates all attack work. In a loop this method queries not completed attack work, picks one attack work and runs it. """ logging.info('******** Start evaluation of attacks ********') prev_submission_id = None while True: # wait until work is available self.attack_work.read_all_from_datastore() if not self.attack_work.work: logging.info('Work is not populated, waiting...') time.sleep(SLEEP_TIME) continue if self.attack_work.is_all_work_competed(): logging.info('All attack work completed.') break # download all attacks data and dataset self.fetch_attacks_data() # pick piece of work work_id = self.attack_work.try_pick_piece_of_work( self.worker_id, submission_id=prev_submission_id) if not work_id: logging.info('Failed to pick work, waiting...') time.sleep(SLEEP_TIME_SHORT) continue logging.info('Selected work_id: %s', work_id) # execute work try: elapsed_time_sec, prev_submission_id = self.run_attack_work(work_id) logging.info('Work %s is done', work_id) # indicate that work is completed is_work_update = self.attack_work.update_work_as_completed( self.worker_id, work_id, other_values={'elapsed_time': elapsed_time_sec}) except WorkerError as e: logging.info('Failed to run work:\n%s', str(e)) is_work_update = self.attack_work.update_work_as_completed( self.worker_id, work_id, error=str(e)) if not is_work_update: logging.warning('Can''t update work "%s" as completed by worker %d', work_id, self.worker_id) logging.info('******** Finished evaluation of attacks ********')
Method which evaluates all attack work. In a loop this method queries not completed attack work, picks one attack work and runs it.
def _filter_dict(input_dict, search_key, search_value): ''' Filters a dictionary of dictionaries by a key-value pair. :param input_dict: is a dictionary whose values are lists of dictionaries :param search_key: is the key in the leaf dictionaries :param search_values: is the value in the leaf dictionaries :return: filtered dictionary ''' output_dict = dict() for key, key_list in six.iteritems(input_dict): key_list_filtered = _filter_list(key_list, search_key, search_value) if key_list_filtered: output_dict[key] = key_list_filtered return output_dict
Filters a dictionary of dictionaries by a key-value pair. :param input_dict: is a dictionary whose values are lists of dictionaries :param search_key: is the key in the leaf dictionaries :param search_values: is the value in the leaf dictionaries :return: filtered dictionary
def _async_recv(self): """No raw bytes should escape from this, all byte encoding and decoding should be handling inside this function""" logging.info("Receive loop started") recbuffer = b"" while not self._stop_event.is_set(): time.sleep(0.01) try: recbuffer = recbuffer + self._socket.recv(1024) data = recbuffer.split(b'\r\n') recbuffer = data.pop() if data: for line in data: self._process_data(line.decode(encoding='UTF-8', errors='ignore')) except BlockingIOError as e: pass logging.info("Receive loop stopped")
No raw bytes should escape from this, all byte encoding and decoding should be handling inside this function
def get_static_parent(raml_resource, method=None): """ Get static parent resource of :raml_resource: with HTTP method :method:. :param raml_resource:Instance of ramlfications.raml.ResourceNode. :param method: HTTP method name which matching static resource must have. """ parent = raml_resource.parent while is_dynamic_resource(parent): parent = parent.parent if parent is None: return parent match_method = method is not None if match_method: if parent.method.upper() == method.upper(): return parent else: return parent for res in parent.root.resources: if res.path == parent.path: if res.method.upper() == method.upper(): return res
Get static parent resource of :raml_resource: with HTTP method :method:. :param raml_resource:Instance of ramlfications.raml.ResourceNode. :param method: HTTP method name which matching static resource must have.
def _sync_binary_dep_links(self, target, gopath, lib_binary_map): """Syncs symlinks under gopath to the library binaries of target's transitive dependencies. :param Target target: Target whose transitive dependencies must be linked. :param str gopath: $GOPATH of target whose "pkg/" directory must be populated with links to library binaries. :param dict<Target, str> lib_binary_map: Dictionary mapping a remote/local Go library to the path of the compiled binary (the ".a" file) of the library. Required links to binary dependencies under gopath's "pkg/" dir are either created if non-existent, or refreshed if the link is older than the underlying binary. Any pre-existing links within gopath's "pkg/" dir that do not correspond to a transitive dependency of target are deleted. """ required_links = set() for dep in target.closure(): if dep == target: continue if not isinstance(dep, GoTarget): continue lib_binary = lib_binary_map[dep] lib_binary_link = os.path.join(gopath, os.path.relpath(lib_binary, self.get_gopath(dep))) safe_mkdir(os.path.dirname(lib_binary_link)) if os.path.islink(lib_binary_link): if os.stat(lib_binary).st_mtime > os.lstat(lib_binary_link).st_mtime: # The binary under the link was updated after the link was created. Refresh # the link so the mtime (modification time) of the link is greater than the # mtime of the binary. This stops Go from needlessly re-compiling the library. os.unlink(lib_binary_link) os.symlink(lib_binary, lib_binary_link) else: os.symlink(lib_binary, lib_binary_link) required_links.add(lib_binary_link) self.remove_unused_links(os.path.join(gopath, 'pkg'), required_links)
Syncs symlinks under gopath to the library binaries of target's transitive dependencies. :param Target target: Target whose transitive dependencies must be linked. :param str gopath: $GOPATH of target whose "pkg/" directory must be populated with links to library binaries. :param dict<Target, str> lib_binary_map: Dictionary mapping a remote/local Go library to the path of the compiled binary (the ".a" file) of the library. Required links to binary dependencies under gopath's "pkg/" dir are either created if non-existent, or refreshed if the link is older than the underlying binary. Any pre-existing links within gopath's "pkg/" dir that do not correspond to a transitive dependency of target are deleted.
def canonical_interface_name(interface, addl_name_map=None): """Function to return an interface's canonical name (fully expanded name). Use of explicit matches used to indicate a clear understanding on any potential match. Regex and other looser matching methods were not implmented to avoid false positive matches. As an example, it would make sense to do "[P|p][O|o]" which would incorrectly match PO = POS and Po = Port-channel, leading to a false positive, not easily troubleshot, found, or known. :param interface: The interface you are attempting to expand. :param addl_name_map (optional): A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"} """ name_map = {} name_map.update(base_interfaces) interface_type, interface_number = split_interface(interface) if isinstance(addl_name_map, dict): name_map.update(addl_name_map) # check in dict for mapping if name_map.get(interface_type): long_int = name_map.get(interface_type) return long_int + py23_compat.text_type(interface_number) # if nothing matched, return the original name else: return interface
Function to return an interface's canonical name (fully expanded name). Use of explicit matches used to indicate a clear understanding on any potential match. Regex and other looser matching methods were not implmented to avoid false positive matches. As an example, it would make sense to do "[P|p][O|o]" which would incorrectly match PO = POS and Po = Port-channel, leading to a false positive, not easily troubleshot, found, or known. :param interface: The interface you are attempting to expand. :param addl_name_map (optional): A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"}
def convert_clip(node, **kwargs): """Map MXNet's Clip operator attributes to onnx's Clip operator and return the created node. """ name, input_nodes, attrs = get_inputs(node, kwargs) a_min = np.float(attrs.get('a_min', -np.inf)) a_max = np.float(attrs.get('a_max', np.inf)) clip_node = onnx.helper.make_node( "Clip", input_nodes, [name], name=name, min=a_min, max=a_max ) return [clip_node]
Map MXNet's Clip operator attributes to onnx's Clip operator and return the created node.
def on_down(self, host): """ Called by the parent Cluster instance when a node is marked down. Only intended for internal use. """ future = self.remove_pool(host) if future: future.add_done_callback(lambda f: self.update_created_pools())
Called by the parent Cluster instance when a node is marked down. Only intended for internal use.
def _get_pages(page_size, total_records): """ Given a page size (records per page) and a total number of records, return the page numbers to be retrieved. """ pages = total_records/page_size+bool(total_records%page_size) return range(1, pages+1)
Given a page size (records per page) and a total number of records, return the page numbers to be retrieved.
def parse_xml_data(content, raincontent, latitude=52.091579, longitude=5.119734, timeframe=60): """Parse the raw data and return as data dictionary.""" result = {SUCCESS: False, MESSAGE: None, DATA: None} if timeframe < 5 or timeframe > 120: raise ValueError("Timeframe must be >=5 and <=120.") if content is not None: result = __parse_ws_data(content, latitude, longitude) if result[SUCCESS] and raincontent is not None: data = __parse_precipfc_data(raincontent, timeframe) result[DATA][PRECIPITATION_FORECAST] = data log.debug("Extracted weather-data: %s", result[DATA]) return result
Parse the raw data and return as data dictionary.
def data(offset, bytes): """Return Data record. This constructs the full record, including the length information, the record type (0x00), the checksum, and the offset. @param offset load offset of first byte. @param bytes list of byte values to pack into record. @return String representation of one HEX record """ assert 0 <= offset < 65536 assert 0 < len(bytes) < 256 b = [len(bytes), (offset>>8)&0x0FF, offset&0x0FF, 0x00] + bytes return Record._from_bytes(b)
Return Data record. This constructs the full record, including the length information, the record type (0x00), the checksum, and the offset. @param offset load offset of first byte. @param bytes list of byte values to pack into record. @return String representation of one HEX record
def store_extra_info(self, key: str, value: Any) -> None: """ Store some extra value in the messaging storage. :param key: key of dictionary entry to add. :param value: value of dictionary entry to add. :returns: None """ self.extra_keys[key] = value
Store some extra value in the messaging storage. :param key: key of dictionary entry to add. :param value: value of dictionary entry to add. :returns: None
def validate_attrs(resource_attr_ids, scenario_id, template_id=None): """ Check that multiple resource attribute satisfy the requirements of the types of resources to which the they are attached. """ multi_rs = db.DBSession.query(ResourceScenario).\ filter(ResourceScenario.resource_attr_id.in_(resource_attr_ids),\ ResourceScenario.scenario_id==scenario_id).\ options(joinedload_all("resourceattr")).\ options(joinedload_all("dataset")).all() errors = [] for rs in multi_rs: try: _do_validate_resourcescenario(rs, template_id) except HydraError as e: error = dict( ref_key = rs.resourceattr.ref_key, ref_id = rs.resourceattr.get_resource_id(), ref_name = rs.resourceattr.get_resource().get_name(), resource_attr_id = rs.resource_attr_id, attr_id = rs.resourceattr.attr.id, attr_name = rs.resourceattr.attr.name, dataset_id = rs.dataset_id, scenario_id = scenario_id, template_id = template_id, error_text = e.args[0]) errors.append(error) return errors
Check that multiple resource attribute satisfy the requirements of the types of resources to which the they are attached.
def instance(self, id=None, application=None, name=None, revision=None, environment=None, parameters=None, submodules=None, destroyInterval=None): """ Smart method. It does everything, to return Instance with given parameters within the application. If instance found running and given parameters are actual: return it. If instance found, but parameters differs - reconfigure instance with new parameters. If instance not found: launch instance with given parameters. Return: Instance object. """ instance = self.get_or_create_instance(id, application, revision, environment, name, parameters, submodules, destroyInterval) reconfigure = False # if found: # if revision and revision is not found.revision: # reconfigure = True # if parameters and parameters is not found.parameters: # reconfigure = True # We need to reconfigure instance if reconfigure: instance.reconfigure(revision=revision, parameters=parameters) return instance
Smart method. It does everything, to return Instance with given parameters within the application. If instance found running and given parameters are actual: return it. If instance found, but parameters differs - reconfigure instance with new parameters. If instance not found: launch instance with given parameters. Return: Instance object.
def _open_generic_http(self, connection_factory, url, data): """Make an HTTP connection using connection_class. This is an internal method that should be called from open_http() or open_https(). Arguments: - connection_factory should take a host name and return an HTTPConnection instance. - url is the url to retrieval or a host, relative-path pair. - data is payload for a POST request or None. """ user_passwd = None proxy_passwd= None if isinstance(url, str): host, selector = splithost(url) if host: user_passwd, host = splituser(host) host = unquote(host) realhost = host else: host, selector = url # check whether the proxy contains authorization information proxy_passwd, host = splituser(host) # now we proceed with the url we want to obtain urltype, rest = splittype(selector) url = rest user_passwd = None if urltype.lower() != 'http': realhost = None else: realhost, rest = splithost(rest) if realhost: user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) if proxy_bypass(realhost): host = realhost if not host: raise IOError('http error', 'no host given') if proxy_passwd: proxy_passwd = unquote(proxy_passwd) proxy_auth = base64.b64encode(proxy_passwd.encode()).decode('ascii') else: proxy_auth = None if user_passwd: user_passwd = unquote(user_passwd) auth = base64.b64encode(user_passwd.encode()).decode('ascii') else: auth = None http_conn = connection_factory(host) headers = {} if proxy_auth: headers["Proxy-Authorization"] = "Basic %s" % proxy_auth if auth: headers["Authorization"] = "Basic %s" % auth if realhost: headers["Host"] = realhost # Add Connection:close as we don't support persistent connections yet. # This helps in closing the socket and avoiding ResourceWarning headers["Connection"] = "close" for header, value in self.addheaders: headers[header] = value if data is not None: headers["Content-Type"] = "application/x-www-form-urlencoded" http_conn.request("POST", selector, data, headers) else: http_conn.request("GET", selector, headers=headers) try: response = http_conn.getresponse() except http_client.BadStatusLine: # something went wrong with the HTTP status line raise URLError("http protocol error: bad status line") # According to RFC 2616, "2xx" code indicates that the client's # request was successfully received, understood, and accepted. if 200 <= response.status < 300: return addinfourl(response, response.msg, "http:" + url, response.status) else: return self.http_error( url, response.fp, response.status, response.reason, response.msg, data)
Make an HTTP connection using connection_class. This is an internal method that should be called from open_http() or open_https(). Arguments: - connection_factory should take a host name and return an HTTPConnection instance. - url is the url to retrieval or a host, relative-path pair. - data is payload for a POST request or None.
def hicpro_capture_chart (self): """ Generate Capture Hi-C plot""" keys = OrderedDict() keys['valid_pairs_on_target_cap_cap'] = { 'color': '#0039e6', 'name': 'Capture-Capture interactions' } keys['valid_pairs_on_target_cap_rep'] = { 'color': '#809fff', 'name': 'Capture-Reporter interactions' } keys['valid_pairs_off_target'] = { 'color': '#cccccc', 'name': 'Off-target valid pairs' } # Check capture info are available num_samples = 0 for s_name in self.hicpro_data: for k in keys: num_samples += sum([1 if k in self.hicpro_data[s_name] else 0]) if num_samples == 0: return False # Config for the plot config = { 'id': 'hicpro_cap_plot', 'title': 'HiC-Pro: Capture Statistics', 'ylab': '# Pairs', 'cpswitch_counts_label': 'Number of Pairs' } return bargraph.plot(self.hicpro_data, keys, config)
Generate Capture Hi-C plot
def dispatch_request(self, req): """ Dispatch a request object. """ log.debug("Dispatching request: {}".format(str(req))) # make sure it's valid res = None try: req.validate() except MissingFieldError as e: res = APIMissingFieldErrorResponse(str(e)) # dispatch the request if not res: try: res = req.dispatch() except Exception as e: msg = "Exception raised while dispatching request: {}".format(repr(e)) log.exception(msg) res = APIGenericErrorResponse(msg) log.debug("Response: {}".format(str(res))) return res
Dispatch a request object.
def list_gebouwen_adapter(obj, request): """ Adapter for rendering a list of :class:`crabpy.gateway.crab.Gebouw` to json. """ return { 'id': obj.id, 'aard': { 'id': obj.aard.id, 'naam': obj.aard.naam, 'definitie': obj.aard.definitie }, 'status': { 'id': obj.status.id, 'naam': obj.status.naam, 'definitie': obj.status.definitie } }
Adapter for rendering a list of :class:`crabpy.gateway.crab.Gebouw` to json.
def pfeedback(self, msg: str) -> None: """For printing nonessential feedback. Can be silenced with `quiet`. Inclusion in redirected output is controlled by `feedback_to_output`.""" if not self.quiet: if self.feedback_to_output: self.poutput(msg) else: self.decolorized_write(sys.stderr, "{}\n".format(msg))
For printing nonessential feedback. Can be silenced with `quiet`. Inclusion in redirected output is controlled by `feedback_to_output`.
def load_data(): data_dir = "/Users/annaho/Data/AAOmega" out_dir = "%s/%s" %(data_dir, "Run_13_July") """ Use all the above functions to set data up for The Cannon """ ff, wl, tr_flux, tr_ivar = load_ref_spectra() """ pick one that doesn't have extra dead pixels """ skylines = tr_ivar[4,:] # should be the same across all obj np.savez("%s/skylines.npz" %out_dir, skylines) contmask = np.load("%s/contmask_regions.npz" %data_dir)['arr_0'] scatter = estimate_noise(tr_flux, contmask) ids, labels = load_labels() # Select the objects in the catalog corresponding to the files inds = [] ff_short = [] for fname in ff: val = fname.split("/")[-1] short = (val.split('.')[0] + '.' + val.split('.')[1]) ff_short.append(short) if short in ids: ind = np.where(ids==short)[0][0] inds.append(ind) # choose the labels tr_id = ids[inds] tr_label = labels[inds] # find the corresponding spectra ff_short = np.array(ff_short) inds = np.array([np.where(ff_short==val)[0][0] for val in tr_id]) tr_flux_choose = tr_flux[inds] tr_ivar_choose = tr_ivar[inds] scatter_choose = scatter[inds] np.savez("%s/wl.npz" %out_dir, wl) np.savez("%s/ref_id_all.npz" %out_dir, tr_id) np.savez("%s/ref_flux_all.npz" %out_dir, tr_flux_choose) np.savez("%s/ref_ivar_all.npz" %out_dir, tr_ivar_choose) np.savez("%s/ref_label_all.npz" %out_dir, tr_label) np.savez("%s/ref_spec_scat_all.npz" %out_dir, scatter_choose) # now, the test spectra test_id, test_flux = load_test_spectra() scatter = estimate_noise(test_flux, contmask) np.savez("%s/test_id.npz" %out_dir, test_id) np.savez("%s/test_flux.npz" %out_dir, test_flux) np.savez("%s/test_spec_scat.npz" %out_dir, scatter)
Use all the above functions to set data up for The Cannon
def configure(access_key=None, secret_key=None, logger=None): ''' Configures s3cmd prior to first use. If no arguments are provided, you will be prompted to enter the access key and secret key interactively. Args: access_key (str): AWS access key secret_key (str): AWS secret key ''' if not logger: logger = log.get_logger('s3') if not all([access_key, secret_key]): logger.info('') access_key = input('AWS Access Key: ') secret_key = input('AWS Secret Key: ') _write_config(access_key, secret_key) logger.info('') logger.info('Completed writing S3 config file.') logger.info('')
Configures s3cmd prior to first use. If no arguments are provided, you will be prompted to enter the access key and secret key interactively. Args: access_key (str): AWS access key secret_key (str): AWS secret key
def get_sources_headers_for_target(self, target): """Return a list of file arguments to provide to the compiler. NB: result list will contain both header and source files! :raises: :class:`NativeCompile.NativeCompileError` if there is an error processing the sources. """ # Get source paths relative to the target base so the exception message with the target and # paths makes sense. target_relative_sources = target.sources_relative_to_target_base() rel_root = target_relative_sources.rel_root # Unique file names are required because we just dump object files into a single directory, and # the compiler will silently just produce a single object file if provided non-unique filenames. # TODO: add some shading to file names so we can remove this check. # NB: It shouldn't matter if header files have the same name, but this will raise an error in # that case as well. We won't need to do any shading of header file names. seen_filenames = defaultdict(list) for src in target_relative_sources: seen_filenames[os.path.basename(src)].append(src) duplicate_filename_err_msgs = [] for fname, source_paths in seen_filenames.items(): if len(source_paths) > 1: duplicate_filename_err_msgs.append("filename: {}, paths: {}".format(fname, source_paths)) if duplicate_filename_err_msgs: raise self.NativeCompileError( "Error in target '{}': source files must have a unique filename within a '{}' target. " "Conflicting filenames:\n{}" .format(target.address.spec, target.alias(), '\n'.join(duplicate_filename_err_msgs))) return [os.path.join(get_buildroot(), rel_root, src) for src in target_relative_sources]
Return a list of file arguments to provide to the compiler. NB: result list will contain both header and source files! :raises: :class:`NativeCompile.NativeCompileError` if there is an error processing the sources.
def defrag(plist): """defrag(plist) -> ([not fragmented], [defragmented], [ [bad fragments], [bad fragments], ... ])""" frags = defaultdict(PacketList) nofrag = PacketList() for p in plist: ip = p[IP] if IP not in p: nofrag.append(p) continue if ip.frag == 0 and ip.flags & 1 == 0: nofrag.append(p) continue uniq = (ip.id, ip.src, ip.dst, ip.proto) frags[uniq].append(p) defrag = [] missfrag = [] for lst in frags.values(): lst.sort(key=lambda x: x.frag) p = lst[0] lastp = lst[-1] if p.frag > 0 or lastp.flags & 1 != 0: # first or last fragment missing missfrag.append(lst) continue p = p.copy() if conf.padding_layer in p: del p[conf.padding_layer].underlayer.payload ip = p[IP] if ip.len is None or ip.ihl is None: clen = len(ip.payload) else: clen = ip.len - (ip.ihl << 2) txt = conf.raw_layer() for q in lst[1:]: if clen != q.frag << 3: # Wrong fragmentation offset if clen > q.frag << 3: warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag << 3, p, txt, q)) missfrag.append(lst) break if q[IP].len is None or q[IP].ihl is None: clen += len(q[IP].payload) else: clen += q[IP].len - (q[IP].ihl << 2) if conf.padding_layer in q: del q[conf.padding_layer].underlayer.payload txt.add_payload(q[IP].payload.copy()) else: ip.flags &= ~1 # !MF del ip.chksum del ip.len p = p / txt defrag.append(p) defrag2 = PacketList() for p in defrag: defrag2.append(p.__class__(bytes(p))) return nofrag, defrag2, missfrag
defrag(plist) -> ([not fragmented], [defragmented], [ [bad fragments], [bad fragments], ... ])
def _invoke_hook(hook_name, target): """ Generic hook invocation. """ try: for value in getattr(target, hook_name): func, args, kwargs = value func(target, *args, **kwargs) except AttributeError: # no hook defined pass except (TypeError, ValueError): # hook not properly defined (might be a mock) pass
Generic hook invocation.
def mavlink_packet(self, m): '''trigger sends from ATTITUDE packets''' if not self.have_home and m.get_type() == 'GPS_RAW_INT' and m.fix_type >= 3: gen_settings.home_lat = m.lat * 1.0e-7 gen_settings.home_lon = m.lon * 1.0e-7 self.have_home = True if self.pending_start: self.start() if m.get_type() != 'ATTITUDE': return t = self.get_time() dt = t - self.last_t if dt < 0 or dt > 10: self.last_t = t return if dt > 10 or dt < 0.9: return self.last_t = t for a in self.aircraft: if not gen_settings.stop: a.update(1.0) self.pkt_queue.append(a.pickled()) while len(self.pkt_queue) > len(self.aircraft)*2: self.pkt_queue.pop(0) if self.module('map') is not None and not self.menu_added_map: self.menu_added_map = True self.module('map').add_menu(self.menu)
trigger sends from ATTITUDE packets
def create(self, timeout=values.unset, priority=values.unset, task_channel=values.unset, workflow_sid=values.unset, attributes=values.unset): """ Create a new TaskInstance :param unicode timeout: The amount of time in seconds the task is allowed to live up to a maximum of 2 weeks. :param unicode priority: Override priority for the Task. :param unicode task_channel: When MultiTasking is enabled specify the type of the task by passing either TaskChannel Unique Name or Task Channel Sid. :param unicode workflow_sid: The WorkflowSid for the Workflow that you would like to handle routing for this Task. :param unicode attributes: Url-encoded JSON string describing the attributes of this task. :returns: Newly created TaskInstance :rtype: twilio.rest.taskrouter.v1.workspace.task.TaskInstance """ data = values.of({ 'Timeout': timeout, 'Priority': priority, 'TaskChannel': task_channel, 'WorkflowSid': workflow_sid, 'Attributes': attributes, }) payload = self._version.create( 'POST', self._uri, data=data, ) return TaskInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], )
Create a new TaskInstance :param unicode timeout: The amount of time in seconds the task is allowed to live up to a maximum of 2 weeks. :param unicode priority: Override priority for the Task. :param unicode task_channel: When MultiTasking is enabled specify the type of the task by passing either TaskChannel Unique Name or Task Channel Sid. :param unicode workflow_sid: The WorkflowSid for the Workflow that you would like to handle routing for this Task. :param unicode attributes: Url-encoded JSON string describing the attributes of this task. :returns: Newly created TaskInstance :rtype: twilio.rest.taskrouter.v1.workspace.task.TaskInstance