code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def polygonize(layer): """Polygonize a raster layer into a vector layer using GDAL. Issue https://github.com/inasafe/inasafe/issues/3183 :param layer: The layer to reproject. :type layer: QgsRasterLayer :return: Reprojected memory layer. :rtype: QgsRasterLayer .. versionadded:: 4.0 """ output_layer_name = polygonize_steps['output_layer_name'] output_layer_name = output_layer_name % layer.keywords['layer_purpose'] gdal_layer_name = polygonize_steps['gdal_layer_name'] if layer.keywords.get('layer_purpose') == 'exposure': output_field = exposure_type_field else: output_field = hazard_value_field input_raster = gdal.Open(layer.source(), gdal.GA_ReadOnly) srs = osr.SpatialReference() srs.ImportFromWkt(input_raster.GetProjectionRef()) temporary_dir = temp_dir(sub_dir='pre-process') out_shapefile = unique_filename( suffix='-%s.shp' % output_layer_name, dir=temporary_dir) driver = ogr.GetDriverByName("ESRI Shapefile") destination = driver.CreateDataSource(out_shapefile) output_layer = destination.CreateLayer(gdal_layer_name, srs) # We have no other way to use a shapefile. We need only the first 10 chars. field_name = output_field['field_name'][0:10] fd = ogr.FieldDefn(field_name, ogr.OFTInteger) output_layer.CreateField(fd) active_band = layer.keywords.get('active_band', 1) input_band = input_raster.GetRasterBand(active_band) # Fixme : add our own callback to Polygonize gdal.Polygonize(input_band, None, output_layer, 0, [], callback=None) destination.Destroy() vector_layer = QgsVectorLayer(out_shapefile, output_layer_name, 'ogr') # Let's remove polygons which were no data request = QgsFeatureRequest() expression = '"%s" = %s' % (field_name, no_data_value) request.setFilterExpression(expression) vector_layer.startEditing() for feature in vector_layer.getFeatures(request): vector_layer.deleteFeature(feature.id()) vector_layer.commitChanges() # We transfer keywords to the output. vector_layer.keywords = layer.keywords.copy() vector_layer.keywords[ layer_geometry['key']] = layer_geometry_polygon['key'] vector_layer.keywords['title'] = output_layer_name # We just polygonized the raster layer. inasafe_fields do not exist. vector_layer.keywords['inasafe_fields'] = { output_field['key']: field_name } check_layer(vector_layer) return vector_layer
Polygonize a raster layer into a vector layer using GDAL. Issue https://github.com/inasafe/inasafe/issues/3183 :param layer: The layer to reproject. :type layer: QgsRasterLayer :return: Reprojected memory layer. :rtype: QgsRasterLayer .. versionadded:: 4.0
def _clamp_value(value, minimum, maximum): """ Clamp a value to fit between a minimum and a maximum. * If ``value`` is between ``minimum`` and ``maximum``, return ``value`` * If ``value`` is below ``minimum``, return ``minimum`` * If ``value is above ``maximum``, return ``maximum`` Args: value (float or int): The number to clamp minimum (float or int): The lowest allowed return value maximum (float or int): The highest allowed return value Returns: float or int: the clamped value Raises: ValueError: if maximum < minimum Example: >>> _clamp_value(3, 5, 10) 5 >>> _clamp_value(11, 5, 10) 10 >>> _clamp_value(8, 5, 10) 8 """ if maximum < minimum: raise ValueError if value < minimum: return minimum elif value > maximum: return maximum else: return value
Clamp a value to fit between a minimum and a maximum. * If ``value`` is between ``minimum`` and ``maximum``, return ``value`` * If ``value`` is below ``minimum``, return ``minimum`` * If ``value is above ``maximum``, return ``maximum`` Args: value (float or int): The number to clamp minimum (float or int): The lowest allowed return value maximum (float or int): The highest allowed return value Returns: float or int: the clamped value Raises: ValueError: if maximum < minimum Example: >>> _clamp_value(3, 5, 10) 5 >>> _clamp_value(11, 5, 10) 10 >>> _clamp_value(8, 5, 10) 8
def task_collection_thread_handler(self, results_queue): """Main method for worker to run Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. :param collections.deque results_queue: Queue for worker to output results to """ # Add tasks until either we run out or we run into an unexpected error while self.tasks_to_add and not self.errors: max_tasks = self._max_tasks_per_request # local copy chunk_tasks_to_add = [] with self._pending_queue_lock: while len(chunk_tasks_to_add) < max_tasks and self.tasks_to_add: chunk_tasks_to_add.append(self.tasks_to_add.pop()) if chunk_tasks_to_add: self._bulk_add_tasks(results_queue, chunk_tasks_to_add)
Main method for worker to run Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. :param collections.deque results_queue: Queue for worker to output results to
def read_shakemap(self, haz_sitecol, assetcol): """ Enabled only if there is a shakemap_id parameter in the job.ini. Download, unzip, parse USGS shakemap files and build a corresponding set of GMFs which are then filtered with the hazard site collection and stored in the datastore. """ oq = self.oqparam E = oq.number_of_ground_motion_fields oq.risk_imtls = oq.imtls or self.datastore.parent['oqparam'].imtls extra = self.riskmodel.get_extra_imts(oq.risk_imtls) if extra: logging.warning('There are risk functions for not available IMTs ' 'which will be ignored: %s' % extra) logging.info('Getting/reducing shakemap') with self.monitor('getting/reducing shakemap'): smap = oq.shakemap_id if oq.shakemap_id else numpy.load( oq.inputs['shakemap']) sitecol, shakemap, discarded = get_sitecol_shakemap( smap, oq.imtls, haz_sitecol, oq.asset_hazard_distance['default'], oq.discard_assets) if len(discarded): self.datastore['discarded'] = discarded assetcol = assetcol.reduce_also(sitecol) logging.info('Building GMFs') with self.monitor('building/saving GMFs'): imts, gmfs = to_gmfs( shakemap, oq.spatial_correlation, oq.cross_correlation, oq.site_effects, oq.truncation_level, E, oq.random_seed, oq.imtls) save_gmf_data(self.datastore, sitecol, gmfs, imts) return sitecol, assetcol
Enabled only if there is a shakemap_id parameter in the job.ini. Download, unzip, parse USGS shakemap files and build a corresponding set of GMFs which are then filtered with the hazard site collection and stored in the datastore.
def convert_out(self, obj): """Write EMIRUUID header on reduction""" newobj = super(ProcessedImageProduct, self).convert_out(obj) if newobj: hdulist = newobj.open() hdr = hdulist[0].header if 'EMIRUUID' not in hdr: hdr['EMIRUUID'] = str(uuid.uuid1()) return newobj
Write EMIRUUID header on reduction
def delete(self): """ Delete the link and free the resources """ if not self._created: return try: node1 = self._nodes[0]["node"] adapter_number1 = self._nodes[0]["adapter_number"] port_number1 = self._nodes[0]["port_number"] except IndexError: return try: yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120) # If the node is already delete (user selected multiple element and delete all in the same time) except aiohttp.web.HTTPNotFound: pass try: node2 = self._nodes[1]["node"] adapter_number2 = self._nodes[1]["adapter_number"] port_number2 = self._nodes[1]["port_number"] except IndexError: return try: yield from node2.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), timeout=120) # If the node is already delete (user selected multiple element and delete all in the same time) except aiohttp.web.HTTPNotFound: pass yield from super().delete()
Delete the link and free the resources
def _action_allowed(self, action): """ participation actions can be disabled on layer level, or disabled on a per node basis """ if getattr(self.layer.participation_settings, '{0}_allowed'.format(action)) is False: return False else: return getattr(self.participation_settings, '{0}_allowed'.format(action))
participation actions can be disabled on layer level, or disabled on a per node basis
def _expr2bddnode(expr): """Convert an expression into a BDD node.""" if expr.is_zero(): return BDDNODEZERO elif expr.is_one(): return BDDNODEONE else: top = expr.top # Register this variable _ = bddvar(top.names, top.indices) root = top.uniqid lo = _expr2bddnode(expr.restrict({top: 0})) hi = _expr2bddnode(expr.restrict({top: 1})) return _bddnode(root, lo, hi)
Convert an expression into a BDD node.
def get_select_sql(self): """ Calculate the difference between this record's value and the lag/lead record's value """ return '(({0}) - ({1}({2}){3}))'.format( self.field.get_select_sql(), self.name.upper(), self.get_field_identifier(), self.get_over(), )
Calculate the difference between this record's value and the lag/lead record's value
def tiered_alignment(in_bam, tier_num, multi_mappers, extra_args, genome_build, pair_stats, work_dir, dirs, config): """Perform the alignment of non-mapped reads from previous tier. """ nomap_fq1, nomap_fq2 = select_unaligned_read_pairs(in_bam, "tier{}".format(tier_num), work_dir, config) if nomap_fq1 is not None: base_name = "{}-tier{}out".format(os.path.splitext(os.path.basename(in_bam))[0], tier_num) config = copy.deepcopy(config) dirs = copy.deepcopy(dirs) config["algorithm"]["bam_sort"] = "queryname" config["algorithm"]["multiple_mappers"] = multi_mappers config["algorithm"]["extra_align_args"] = ["-i", int(pair_stats["mean"]), int(pair_stats["std"])] + extra_args out_bam, ref_file = align_to_sort_bam(nomap_fq1, nomap_fq2, lane.rg_names(base_name, base_name, config), genome_build, "novoalign", dirs, config, dir_ext=os.path.join("hydra", os.path.split(nomap_fq1)[0])) return out_bam else: return None
Perform the alignment of non-mapped reads from previous tier.
def put(self, storagemodel:object, modeldefinition = None) -> StorageQueueModel: """ insert queue message into storage """ try: message = modeldefinition['queueservice'].put_message(storagemodel._queuename, storagemodel.getmessage()) storagemodel.mergemessage(message) except Exception as e: storagemodel = None msg = 'can not save queue message: queue {} with message {} because {!s}'.format(storagemodel._queuename, storagemodel.content, e) raise AzureStorageWrapException(msg=msg) finally: return storagemodel
insert queue message into storage
def filter_accept_reftrack(self, reftrack): """Return True, if the filter accepts the given reftrack :param reftrack: the reftrack to filter :type reftrack: :class:`jukeboxcore.reftrack.Reftrack` :returns: True, if the filter accepts the reftrack :rtype: :class:`bool` :raises: None """ if reftrack.status() in self._forbidden_status: return False if reftrack.get_typ() in self._forbidden_types: return False if reftrack.uptodate() in self._forbidden_uptodate: return False if reftrack.alien() in self._forbidden_alien: return False return True
Return True, if the filter accepts the given reftrack :param reftrack: the reftrack to filter :type reftrack: :class:`jukeboxcore.reftrack.Reftrack` :returns: True, if the filter accepts the reftrack :rtype: :class:`bool` :raises: None
def p_for_sentence_start(p): """ for_start : FOR ID EQ expr TO expr step """ gl.LOOPS.append(('FOR', p[2])) p[0] = None if p[4] is None or p[6] is None or p[7] is None: return if is_number(p[4], p[6], p[7]): if p[4].value != p[6].value and p[7].value == 0: warning(p.lineno(5), 'STEP value is 0 and FOR might loop forever') if p[4].value > p[6].value and p[7].value > 0: warning(p.lineno(5), 'FOR start value is greater than end. This FOR loop is useless') if p[4].value < p[6].value and p[7].value < 0: warning(p.lineno(2), 'FOR start value is lower than end. This FOR loop is useless') id_type = common_type(common_type(p[4], p[6]), p[7]) variable = SYMBOL_TABLE.access_var(p[2], p.lineno(2), default_type=id_type) if variable is None: return variable.accessed = True expr1 = make_typecast(variable.type_, p[4], p.lineno(3)) expr2 = make_typecast(variable.type_, p[6], p.lineno(5)) expr3 = make_typecast(variable.type_, p[7], p.lexer.lineno) p[0] = make_sentence('FOR', variable, expr1, expr2, expr3)
for_start : FOR ID EQ expr TO expr step
def get_name_type_dict(self): """ Returns a dictionary of the type {'column_name': data_type, ...} :return: dict """ attrs = self.get_attributes() types = self.get_types() d = dict() for i,a in enumerate(attrs): d[a] = types[i] return d
Returns a dictionary of the type {'column_name': data_type, ...} :return: dict
def _make_txn_selector(self): """Helper for :meth:`read`.""" if self._transaction_id is not None: return TransactionSelector(id=self._transaction_id) if self._read_timestamp: key = "read_timestamp" value = _datetime_to_pb_timestamp(self._read_timestamp) elif self._min_read_timestamp: key = "min_read_timestamp" value = _datetime_to_pb_timestamp(self._min_read_timestamp) elif self._max_staleness: key = "max_staleness" value = _timedelta_to_duration_pb(self._max_staleness) elif self._exact_staleness: key = "exact_staleness" value = _timedelta_to_duration_pb(self._exact_staleness) else: key = "strong" value = True options = TransactionOptions( read_only=TransactionOptions.ReadOnly(**{key: value}) ) if self._multi_use: return TransactionSelector(begin=options) else: return TransactionSelector(single_use=options)
Helper for :meth:`read`.
async def remove(self, *instances, using_db=None) -> None: """ Removes one or more of ``instances`` from the relation. """ db = using_db if using_db else self.model._meta.db if not instances: raise OperationalError("remove() called on no instances") through_table = Table(self.field.through) if len(instances) == 1: condition = (getattr(through_table, self.field.forward_key) == instances[0].id) & ( getattr(through_table, self.field.backward_key) == self.instance.id ) else: condition = (getattr(through_table, self.field.backward_key) == self.instance.id) & ( getattr(through_table, self.field.forward_key).isin([i.id for i in instances]) ) query = db.query_class.from_(through_table).where(condition).delete() await db.execute_query(str(query))
Removes one or more of ``instances`` from the relation.
def list_engines_by_priority(engines=None): """ Return a list of engines supported sorted by each priority. """ if engines is None: engines = ENGINES return sorted(engines, key=operator.methodcaller("priority"))
Return a list of engines supported sorted by each priority.
def platform_cache_dir(): """ Returns a directory which should be writable for any application This should be used for temporary deletable data. """ if WIN32: # nocover dpath_ = '~/AppData/Local' elif LINUX: # nocover dpath_ = '~/.cache' elif DARWIN: # nocover dpath_ = '~/Library/Caches' else: # nocover raise NotImplementedError('Unknown Platform %r' % (sys.platform,)) dpath = normpath(expanduser(dpath_)) return dpath
Returns a directory which should be writable for any application This should be used for temporary deletable data.
def _spinboxValueChanged(self, index, spinBox=None): """ Is called when a spin box value was changed. Updates the spin boxes and sets other combo boxes having the same index to the fake dimension of length 1. """ if spinBox is None: spinBox = self.sender() assert spinBox, "spinBox not defined and not the sender" logger.debug("{} sigContentsChanged signal (spinBox)" .format("Blocked" if self.signalsBlocked() else "Emitting")) self.sigContentsChanged.emit(UpdateReason.COLLECTOR_SPIN_BOX)
Is called when a spin box value was changed. Updates the spin boxes and sets other combo boxes having the same index to the fake dimension of length 1.
def creep_data(data_set='creep_rupture'): """Brun and Yoshida's metal creep rupture data.""" if not data_available(data_set): download_data(data_set) path = os.path.join(data_path, data_set) tar_file = os.path.join(path, 'creeprupt.tar') tar = tarfile.open(tar_file) print('Extracting file.') tar.extractall(path=path) tar.close() all_data = np.loadtxt(os.path.join(data_path, data_set, 'taka')) y = all_data[:, 1:2].copy() features = [0] features.extend(range(2, 31)) X = all_data[:, features].copy() return data_details_return({'X': X, 'y': y}, data_set)
Brun and Yoshida's metal creep rupture data.
def consume_token(self, tokens, index, tokens_len): """Consume a token. Returns tuple of (tokens, tokens_len, index) when consumption is completed and tokens have been merged together. """ del tokens_len consumption_ended = False q_type = self.quote_type begin_literal_type = getattr(TokenType, "Begin{0}QuotedLiteral".format(q_type)) end_literal_type = getattr(TokenType, "End{0}QuotedLiteral".format(q_type)) if (index != self.begin and tokens[index].type == begin_literal_type): # This is an edge case where a quote begins a line and matched # as a quoted region beginning and a quoted region ending. # Split the token before and after the quote, mark the # quote character itself as an ending and insert both # tokens back in, handling the ending afterwards. assert _RE_QUOTE_TYPE.match(tokens[index].content[0]) # Mini-tokenize everything after the first token line_tokens = _scan_for_tokens(tokens[index].content[1:]) end_type = getattr(TokenType, "End{0}QuotedLiteral".format(q_type)) replacement = [Token(type=end_type, content=tokens[index].content[0], line=tokens[index].line, col=tokens[index].col)] for after in line_tokens: replacement.append(Token(type=after.type, content=after.content, line=(tokens[index].line + after.line - 1), col=(tokens[index].col + after.col - 1))) tokens = _replace_token_range(tokens, index, index + 1, replacement) consumption_ended = True if tokens[index].type == end_literal_type: consumption_ended = True if consumption_ended: end = index + 1 pasted = "" for i in range(self.begin, end): pasted += tokens[i].content tokens = _replace_token_range(tokens, self.begin, end, [Token(type=TokenType.QuotedLiteral, content=pasted, line=tokens[self.begin].line, col=tokens[self.begin].col)]) return (self.begin, len(tokens), tokens)
Consume a token. Returns tuple of (tokens, tokens_len, index) when consumption is completed and tokens have been merged together.
def train(self): """Train the network using the training dataset. Parameters ---------- None Returns ------- None """ self.stamp_start = time.time() for iteration, batch in tqdm.tqdm(enumerate(self.iter_train), desc='train', total=self.max_iter, ncols=80): self.epoch = self.iter_train.epoch self.iteration = iteration ############ # validate # ############ if self.interval_validate and \ self.iteration % self.interval_validate == 0: self.validate() ######### # train # ######### batch = map(datasets.transform_lsvrc2012_vgg16, batch) in_vars = utils.batch_to_vars(batch, device=self.device) self.model.zerograds() loss = self.model(*in_vars) if loss is not None: loss.backward() self.optimizer.update() lbl_true = zip(*batch)[1] lbl_pred = chainer.functions.argmax(self.model.score, axis=1) lbl_pred = chainer.cuda.to_cpu(lbl_pred.data) acc = utils.label_accuracy_score( lbl_true, lbl_pred, self.model.n_class) self._write_log(**{ 'epoch': self.epoch, 'iteration': self.iteration, 'elapsed_time': time.time() - self.stamp_start, 'train/loss': float(loss.data), 'train/acc': acc[0], 'train/acc_cls': acc[1], 'train/mean_iu': acc[2], 'train/fwavacc': acc[3], }) if iteration >= self.max_iter: self._save_model() break
Train the network using the training dataset. Parameters ---------- None Returns ------- None
def evalrepr(self): """Evaluable repr""" if self.is_model(): return self.get_fullname() else: return self.parent.evalrepr + "." + self.name
Evaluable repr
def _put_key(file_path, dest_key=None, overwrite=True): """ Upload given file into DKV and save it under give key as raw object. :param dest_key: name of destination key in DKV :param file_path: path to file to upload :return: key name if object was uploaded successfully """ ret = api("POST /3/PutKey?destination_key={}&overwrite={}".format(dest_key if dest_key else '', overwrite), filename=file_path) return ret["destination_key"]
Upload given file into DKV and save it under give key as raw object. :param dest_key: name of destination key in DKV :param file_path: path to file to upload :return: key name if object was uploaded successfully
def copy(self): """ Safely get a copy of the current mesh. Copied objects will have emptied caches to avoid memory issues and so may be slow on initial operations until caches are regenerated. Current object will *not* have its cache cleared. Returns --------- copied : trimesh.Trimesh Copy of current mesh """ copied = Trimesh() # copy vertex and face data copied._data.data = copy.deepcopy(self._data.data) # copy visual information copied.visual = self.visual.copy() # get metadata copied.metadata = copy.deepcopy(self.metadata) # get center_mass and density if self._center_mass is not None: copied.center_mass = self.center_mass copied._density = self._density # make sure cache is set from here copied._cache.clear() return copied
Safely get a copy of the current mesh. Copied objects will have emptied caches to avoid memory issues and so may be slow on initial operations until caches are regenerated. Current object will *not* have its cache cleared. Returns --------- copied : trimesh.Trimesh Copy of current mesh
def update(self): """Update |C2| based on :math:`c_2 = 1.-c_1-c_3`. Examples: The following examples show the calculated value of |C2| are clipped when to low or to high: >>> from hydpy.models.hstream import * >>> parameterstep('1d') >>> derived.c1 = 0.6 >>> derived.c3 = 0.1 >>> derived.c2.update() >>> derived.c2 c2(0.3) >>> derived.c1 = 1.6 >>> derived.c2.update() >>> derived.c2 c2(0.0) >>> derived.c1 = -1.6 >>> derived.c2.update() >>> derived.c2 c2(1.0) """ der = self.subpars self(numpy.clip(1. - der.c1 - der.c3, 0., 1.))
Update |C2| based on :math:`c_2 = 1.-c_1-c_3`. Examples: The following examples show the calculated value of |C2| are clipped when to low or to high: >>> from hydpy.models.hstream import * >>> parameterstep('1d') >>> derived.c1 = 0.6 >>> derived.c3 = 0.1 >>> derived.c2.update() >>> derived.c2 c2(0.3) >>> derived.c1 = 1.6 >>> derived.c2.update() >>> derived.c2 c2(0.0) >>> derived.c1 = -1.6 >>> derived.c2.update() >>> derived.c2 c2(1.0)
def info(): """Generate information for a bug report.""" try: platform_info = { 'system': platform.system(), 'release': platform.release(), } except IOError: platform_info = { 'system': 'Unknown', 'release': 'Unknown', } implementation_info = _implementation() urllib3_info = {'version': urllib3.__version__} chardet_info = {'version': chardet.__version__} pyopenssl_info = { 'version': None, 'openssl_version': '', } if OpenSSL: pyopenssl_info = { 'version': OpenSSL.__version__, 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER, } cryptography_info = { 'version': getattr(cryptography, '__version__', ''), } idna_info = { 'version': getattr(idna, '__version__', ''), } system_ssl = ssl.OPENSSL_VERSION_NUMBER system_ssl_info = { 'version': '%x' % system_ssl if system_ssl is not None else '' } return { 'platform': platform_info, 'implementation': implementation_info, 'system_ssl': system_ssl_info, 'using_pyopenssl': pyopenssl is not None, 'pyOpenSSL': pyopenssl_info, 'urllib3': urllib3_info, 'chardet': chardet_info, 'cryptography': cryptography_info, 'idna': idna_info, 'requests': { 'version': requests_version, }, }
Generate information for a bug report.
def alias_composition(self, composition_id, alias_id): """Adds an ``Id`` to a ``Composition`` for the purpose of creating compatibility. The primary ``Id`` of the ``Composition`` is determined by the provider. The new ``Id`` is an alias to the primary ``Id``. If the alias is a pointer to another composition, it is reassigned to the given composition ``Id``. arg: composition_id (osid.id.Id): the ``Id`` of a ``Composition`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is in use as a primary ``Id`` raise: NotFound - ``composition_id`` not found raise: NullArgument - ``composition_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.alias_resources_template self._alias_id(primary_id=composition_id, equivalent_id=alias_id)
Adds an ``Id`` to a ``Composition`` for the purpose of creating compatibility. The primary ``Id`` of the ``Composition`` is determined by the provider. The new ``Id`` is an alias to the primary ``Id``. If the alias is a pointer to another composition, it is reassigned to the given composition ``Id``. arg: composition_id (osid.id.Id): the ``Id`` of a ``Composition`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is in use as a primary ``Id`` raise: NotFound - ``composition_id`` not found raise: NullArgument - ``composition_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def parse_xml_node(self, node): '''Parse an xml.dom Node object representing a condition into this object. ''' self.sequence = int(node.getAttributeNS(RTS_NS, 'sequence')) c = node.getElementsByTagNameNS(RTS_NS, 'TargetComponent') if c.length != 1: raise InvalidParticipantNodeError self.target_component = TargetExecutionContext().parse_xml_node(c[0]) for c in get_direct_child_elements_xml(node, prefix=RTS_EXT_NS, local_name='Properties'): name, value = parse_properties_xml(c) self._properties[name] = value return self
Parse an xml.dom Node object representing a condition into this object.
def average(numbers, numtype='float'): """ Calculates the average or mean of a list of numbers Args: numbers: a list of integers or floating point numbers. numtype: string, 'decimal' or 'float'; the type of number to return. Returns: The average (mean) of the numbers as a floating point number or a Decimal object. Requires: The math module """ if type == 'decimal': return Decimal(sum(numbers)) / len(numbers) else: return float(sum(numbers)) / len(numbers)
Calculates the average or mean of a list of numbers Args: numbers: a list of integers or floating point numbers. numtype: string, 'decimal' or 'float'; the type of number to return. Returns: The average (mean) of the numbers as a floating point number or a Decimal object. Requires: The math module
def find(self, name): """Returns a dict of collector's details if found. Args: name (str): name of collector searching for """ collectors = self.get_collectors() for collector in collectors: if name.lower() == collector['name'].lower(): self.collector_id = collector['id'] return collector return {'status': 'No results found.'}
Returns a dict of collector's details if found. Args: name (str): name of collector searching for
def run_command(self, codeobj): """Execute a compiled code object, and write the output back to the client.""" try: value, stdout = yield from self.attempt_exec(codeobj, self.namespace) except Exception: yield from self.send_exception() return else: yield from self.send_output(value, stdout)
Execute a compiled code object, and write the output back to the client.
def clients(self, protocol=None, groups=None): """Returns a list of :py:class:`.Client` for the specific query by the user. Keyword Parameters: protocol Ignored. groups The groups (types) to which the clients belong either from ('Genuine', 'Impostor') Note that 'eval' is an alias for 'Genuine'. If no groups are specified, then both clients are impostors are listed. Returns: A list containing all the clients which have the given properties. """ groups = self.__group_replace_eval_by_genuine__(groups) groups = self.check_parameters_for_validity(groups, "group", self.client_types()) # List of the clients q = self.query(Client) if groups: q = q.filter(Client.stype.in_(groups)) q = q.order_by(Client.id) return list(q)
Returns a list of :py:class:`.Client` for the specific query by the user. Keyword Parameters: protocol Ignored. groups The groups (types) to which the clients belong either from ('Genuine', 'Impostor') Note that 'eval' is an alias for 'Genuine'. If no groups are specified, then both clients are impostors are listed. Returns: A list containing all the clients which have the given properties.
def play_sync(self): """ Play the video and block whilst the video is playing """ self.play() logger.info("Playing synchronously") try: time.sleep(0.05) logger.debug("Wait for playing to start") while self.is_playing(): time.sleep(0.05) except DBusException: logger.error( "Cannot play synchronously any longer as DBus calls timed out." )
Play the video and block whilst the video is playing
def err_exit(msg, rc=1): """Print msg to stderr and exit with rc. """ print(msg, file=sys.stderr) sys.exit(rc)
Print msg to stderr and exit with rc.
def picknthweekday(year, month, dayofweek, hour, minute, whichweek): """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ first = datetime.datetime(year, month, 1, hour, minute) # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), # Because 7 % 7 = 0 weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) wd = weekdayone + ((whichweek - 1) * ONEWEEK) if (wd.month != month): wd -= ONEWEEK return wd
dayofweek == 0 means Sunday, whichweek 5 means last instance
def most_visited_venues_card(num=10): """ Displays a card showing the Venues that have the most Events. In spectator_core tags, rather than spectator_events so it can still be used on core pages, even if spectator_events isn't installed. """ if spectator_apps.is_enabled('events'): object_list = most_visited_venues(num=num) object_list = chartify(object_list, 'num_visits', cutoff=1) return { 'card_title': 'Most visited venues', 'score_attr': 'num_visits', 'object_list': object_list, }
Displays a card showing the Venues that have the most Events. In spectator_core tags, rather than spectator_events so it can still be used on core pages, even if spectator_events isn't installed.
def items(self) -> Iterable[Tuple[str, Any]]: """An iterable of (name, value) pairs. .. versionadded:: 3.1 """ return [(opt.name, opt.value()) for name, opt in self._options.items()]
An iterable of (name, value) pairs. .. versionadded:: 3.1
def restore(self, state): """Restore a previous state of this stream walker. Raises: ArgumentError: If the state refers to a different selector or the offset is invalid. """ selector = DataStreamSelector.FromString(state.get(u'selector')) if selector != self.selector: raise ArgumentError("Attempted to restore a BufferedStreamWalker with a different selector", selector=self.selector, serialized_data=state) self.seek(state.get(u'offset'), target="offset")
Restore a previous state of this stream walker. Raises: ArgumentError: If the state refers to a different selector or the offset is invalid.
def get_default_config(self): """ Returns the default collector settings """ config = super(NumaCollector, self).get_default_config() config.update({ 'path': 'numa', 'bin': self.find_binary('numactl'), }) return config
Returns the default collector settings
def _set_mode(self, discover_mode, connect_mode): """Set the mode of the BLED112, used to enable and disable advertising To enable advertising, use 4, 2. To disable advertising use 0, 0. Args: discover_mode (int): The discoverability mode, 0 for off, 4 for on (user data) connect_mode (int): The connectability mode, 0 for of, 2 for undirected connectable """ payload = struct.pack("<BB", discover_mode, connect_mode) response = self._send_command(6, 1, payload) result, = unpack("<H", response.payload) if result != 0: return False, {'reason': 'Error code from BLED112 setting mode', 'code': result} return True, None
Set the mode of the BLED112, used to enable and disable advertising To enable advertising, use 4, 2. To disable advertising use 0, 0. Args: discover_mode (int): The discoverability mode, 0 for off, 4 for on (user data) connect_mode (int): The connectability mode, 0 for of, 2 for undirected connectable
def iter_package_families(paths=None): """Iterate over package families, in no particular order. Note that multiple package families with the same name can be returned. Unlike packages, families later in the searchpath are not hidden by earlier families. Args: paths (list of str, optional): paths to search for package families, defaults to `config.packages_path`. Returns: `PackageFamily` iterator. """ for path in (paths or config.packages_path): repo = package_repository_manager.get_repository(path) for resource in repo.iter_package_families(): yield PackageFamily(resource)
Iterate over package families, in no particular order. Note that multiple package families with the same name can be returned. Unlike packages, families later in the searchpath are not hidden by earlier families. Args: paths (list of str, optional): paths to search for package families, defaults to `config.packages_path`. Returns: `PackageFamily` iterator.
def close(self): '''close the Mission Editor window''' self.time_to_quit = True self.close_window.release() if self.child.is_alive(): self.child.join(1) self.child.terminate() self.mavlink_message_queue_handler.join() self.event_queue_lock.acquire() self.event_queue.put(MissionEditorEvent(me_event.MEE_TIME_TO_QUIT)); self.event_queue_lock.release()
close the Mission Editor window
def send_command(self, *args, **kwargs): """ Send command to network device retrieve output until router_prompt or expect_string By default this method will keep waiting to receive data until the network device prompt is detected. The current network device prompt will be determined automatically. command_string = command to execute expect_string = pattern to search for uses re.search (use raw strings) delay_factor = decrease the initial delay before we start looking for data max_loops = number of iterations before we give up and raise an exception strip_prompt = strip the trailing prompt from the output strip_command = strip the leading command from the output """ if len(args) >= 2: expect_string = args[1] else: expect_string = kwargs.get("expect_string") if expect_string is None: expect_string = r"(OK|ERROR|Command not recognized\.)" expect_string = self.RETURN + expect_string + self.RETURN kwargs.setdefault("expect_string", expect_string) output = super(CiscoSSHConnection, self).send_command(*args, **kwargs) return output
Send command to network device retrieve output until router_prompt or expect_string By default this method will keep waiting to receive data until the network device prompt is detected. The current network device prompt will be determined automatically. command_string = command to execute expect_string = pattern to search for uses re.search (use raw strings) delay_factor = decrease the initial delay before we start looking for data max_loops = number of iterations before we give up and raise an exception strip_prompt = strip the trailing prompt from the output strip_command = strip the leading command from the output
def build_listen(self, listen_node): """parse `listen` sections, and return a config.Listen Args: listen_node (TreeNode): Description Returns: config.Listen: an object """ proxy_name = listen_node.listen_header.proxy_name.text service_address_node = listen_node.listen_header.service_address # parse the config block config_block_lines = self.__build_config_block( listen_node.config_block) # parse host and port host, port = '', '' if isinstance(service_address_node, pegnode.ServiceAddress): host = service_address_node.host.text port = service_address_node.port.text else: # use `bind` in config lines to fill in host and port # just use the first for line in config_block_lines: if isinstance(line, config.Bind): host, port = line.host, line.port break else: raise Exception( 'Not specify host and port in `listen` definition') return config.Listen( name=proxy_name, host=host, port=port, config_block=config_block_lines)
parse `listen` sections, and return a config.Listen Args: listen_node (TreeNode): Description Returns: config.Listen: an object
def _get_bgp_route_attr(self, destination, vrf, next_hop, ip_version=4): """ BGP protocol attributes for get_route_tp Only IPv4 supported """ CMD_SHIBNV = 'show ip bgp neighbors vrf {vrf} | include "is {neigh}"' search_re_dict = { "aspath": { "re": r"AS-Path: ([\d\(\)]([\d\(\) ])*)", "group": 1, "default": "", }, "bgpnh": { "re": r"[^|\\n][ ]{4}(" + IP_ADDR_REGEX + r")", "group": 1, "default": "", }, "bgpfrom": { "re": r"from (" + IP_ADDR_REGEX + r")", "group": 1, "default": "", }, "bgpcomm": { "re": r" Community: ([\w\d\-\: ]+)", "group": 1, "default": "", }, "bgplp": {"re": r"localpref (\d+)", "group": 1, "default": ""}, # external, internal, redist "bgpie": {"re": r"^: (\w+),", "group": 1, "default": ""}, "vrfimp": { "re": r"Imported from [\S]+ \(VRF (\S+)\)", "group": 1, "default": "", }, } bgp_attr = {} # get BGP AS number outbgp = self._send_command('show bgp process | include "BGP Protocol Tag"') matchbgpattr = RE_BGP_PROTO_TAG.match(outbgp) if not matchbgpattr: return bgp_attr bgpas = matchbgpattr.group(1) if ip_version == 4: bgpcmd = "show ip bgp vrf {vrf} {destination}".format( vrf=vrf, destination=destination ) outbgp = self._send_command(bgpcmd) outbgpsec = outbgp.split("Path type") # this should not happen (zero BGP paths)... if len(outbgpsec) == 1: return bgp_attr # process all bgp paths for bgppath in outbgpsec[1:]: if "is best path" not in bgppath: # only best path is added to protocol attributes continue # find BGP attributes for key in search_re_dict: matchre = re.search(search_re_dict[key]["re"], bgppath) if matchre: groupnr = int(search_re_dict[key]["group"]) search_re_dict[key]["result"] = matchre.group(groupnr) else: search_re_dict[key]["result"] = search_re_dict[key]["default"] bgpnh = search_re_dict["bgpnh"]["result"] # if route is not leaked next hops have to match if ( not (search_re_dict["bgpie"]["result"] in ["redist", "local"]) ) and (bgpnh != next_hop): # this is not the right route continue # find remote AS nr. of this neighbor bgpcmd = CMD_SHIBNV.format(vrf=vrf, neigh=bgpnh) outbgpnei = self._send_command(bgpcmd) matchbgpras = RE_BGP_REMOTE_AS.search(outbgpnei) if matchbgpras: bgpras = matchbgpras.group(1) else: # next-hop is not known in this vrf, route leaked from # other vrf or from vpnv4 table? # get remote AS nr. from as-path if it is ebgp neighbor # if locally sourced remote AS if undefined bgpie = search_re_dict["bgpie"]["result"] if bgpie == "external": bgpras = bgpie.split(" ")[0].replace("(", "") elif bgpie == "internal": bgpras = bgpas else: # redist, local bgpras = "" # community bothcomm = [] extcomm = [] stdcomm = search_re_dict["bgpcomm"]["result"].split() commsplit = bgppath.split("Extcommunity:") if len(commsplit) == 2: for line in commsplit[1].split("\n")[1:]: # RT:65004:22 matchcommun = RE_BGP_COMMUN.match(line) if matchcommun: extcomm.append(matchcommun.group(1)) else: # we've reached end of the extended community section break bothcomm = stdcomm + extcomm bgp_attr = { "as_path": search_re_dict["aspath"]["result"].strip(), "remote_address": search_re_dict["bgpfrom"]["result"], "local_preference": int(search_re_dict["bgplp"]["result"]), "communities": bothcomm, "local_as": helpers.as_number(bgpas), } if bgpras: bgp_attr["remote_as"] = helpers.as_number(bgpras) else: bgp_attr["remote_as"] = 0 # 0? , locally sourced return bgp_attr
BGP protocol attributes for get_route_tp Only IPv4 supported
def register(linter): """required method to auto register this checker """ linter.register_checker(TypeChecker(linter)) linter.register_checker(IterableChecker(linter))
required method to auto register this checker
def modify( login, password=None, password_hashed=False, domain=None, profile=None, script=None, drive=None, homedir=None, fullname=None, account_desc=None, account_control=None, machine_sid=None, user_sid=None, reset_login_hours=False, reset_bad_password_count=False, ): ''' Modify user account login : string login name password : string password password_hashed : boolean set if password is a nt hash instead of plain text domain : string users domain profile : string profile path script : string logon script drive : string home drive homedir : string home directory fullname : string full name account_desc : string account description machine_sid : string specify the machines new primary group SID or rid user_sid : string specify the users new primary group SID or rid account_control : string specify user account control properties .. note:: Only the following can be set: - N: No password required - D: Account disabled - H: Home directory required - L: Automatic Locking - X: Password does not expire reset_login_hours : boolean reset the users allowed logon hours reset_bad_password_count : boolean reset the stored bad login counter .. note:: if user is absent and password is provided, the user will be created CLI Example: .. code-block:: bash salt '*' pdbedit.modify inara fullname='Inara Serra' salt '*' pdbedit.modify simon password=r1v3r salt '*' pdbedit.modify jane drive='V:' homedir='\\\\serenity\\jane\\profile' salt '*' pdbedit.modify mal account_control=NX ''' ret = 'unchanged' # flag mapping flags = { 'domain': '--domain=', 'full name': '--fullname=', 'account desc': '--account-desc=', 'home directory': '--homedir=', 'homedir drive': '--drive=', 'profile path': '--profile=', 'logon script': '--script=', 'account flags': '--account-control=', 'user sid': '-U ', 'machine sid': '-M ', } # field mapping provided = { 'domain': domain, 'full name': fullname, 'account desc': account_desc, 'home directory': homedir, 'homedir drive': drive, 'profile path': profile, 'logon script': script, 'account flags': account_control, 'user sid': user_sid, 'machine sid': machine_sid, } # update password if password: ret = create(login, password, password_hashed)[login] if ret not in ['updated', 'created', 'unchanged']: return {login: ret} elif login not in list_users(False): return {login: 'absent'} # check for changes current = get_user(login, hashes=True) changes = {} for key, val in provided.items(): if key in ['user sid', 'machine sid']: if val is not None and key in current and not current[key].endswith(six.text_type(val)): changes[key] = six.text_type(val) elif key in ['account flags']: if val is not None: if val.startswith('['): val = val[1:-1] new = [] for f in val.upper(): if f not in ['N', 'D', 'H', 'L', 'X']: logmsg = 'pdbedit.modify - unknown {} flag for account_control, ignored'.format(f) log.warning(logmsg) else: new.append(f) changes[key] = "[{flags}]".format(flags="".join(new)) else: if val is not None and key in current and current[key] != val: changes[key] = val # apply changes if changes or reset_login_hours or reset_bad_password_count: cmds = [] for change in changes: cmds.append('{flag}{value}'.format( flag=flags[change], value=_quote_args(changes[change]), )) if reset_login_hours: cmds.append('--logon-hours-reset') if reset_bad_password_count: cmds.append('--bad-password-count-reset') res = __salt__['cmd.run_all']( 'pdbedit --modify --user {login} {changes}'.format( login=_quote_args(login), changes=" ".join(cmds), ), ) if res['retcode'] > 0: return {login: res['stderr'] if 'stderr' in res else res['stdout']} if ret != 'created': ret = 'updated' return {login: ret}
Modify user account login : string login name password : string password password_hashed : boolean set if password is a nt hash instead of plain text domain : string users domain profile : string profile path script : string logon script drive : string home drive homedir : string home directory fullname : string full name account_desc : string account description machine_sid : string specify the machines new primary group SID or rid user_sid : string specify the users new primary group SID or rid account_control : string specify user account control properties .. note:: Only the following can be set: - N: No password required - D: Account disabled - H: Home directory required - L: Automatic Locking - X: Password does not expire reset_login_hours : boolean reset the users allowed logon hours reset_bad_password_count : boolean reset the stored bad login counter .. note:: if user is absent and password is provided, the user will be created CLI Example: .. code-block:: bash salt '*' pdbedit.modify inara fullname='Inara Serra' salt '*' pdbedit.modify simon password=r1v3r salt '*' pdbedit.modify jane drive='V:' homedir='\\\\serenity\\jane\\profile' salt '*' pdbedit.modify mal account_control=NX
def advance_for_next_slice(self, recovery_slice=False): """Advance self for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info. """ self.slice_start_time = None self.slice_request_id = None self.slice_retries = 0 self.acquired_once = False if recovery_slice: self.slice_id += 2 else: self.slice_id += 1
Advance self for next slice. Args: recovery_slice: True if this slice is running recovery logic. See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery for more info.
def Hide(self, waitTime: float = OPERATION_WAIT_TIME) -> bool: """ Call native `ShowWindow(SW.Hide)`. waitTime: float Return bool, True if succeed otherwise False. """ return self.ShowWindow(SW.Hide, waitTime)
Call native `ShowWindow(SW.Hide)`. waitTime: float Return bool, True if succeed otherwise False.
def delete_datapoints_in_time_range(self, start_dt=None, end_dt=None): """Delete datapoints from this stream between the provided start and end times If neither a start or end time is specified, all data points in the stream will be deleted. :param start_dt: The datetime after which data points should be deleted or None if all data points from the beginning of time should be deleted. :param end_dt: The datetime before which data points should be deleted or None if all data points until the current time should be deleted. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error """ start_dt = to_none_or_dt(validate_type(start_dt, datetime.datetime, type(None))) end_dt = to_none_or_dt(validate_type(end_dt, datetime.datetime, type(None))) params = {} if start_dt is not None: params['startTime'] = isoformat(start_dt) if end_dt is not None: params['endTime'] = isoformat(end_dt) self._conn.delete("/ws/DataPoint/{stream_id}{querystring}".format( stream_id=self.get_stream_id(), querystring="?" + urllib.parse.urlencode(params) if params else "", ))
Delete datapoints from this stream between the provided start and end times If neither a start or end time is specified, all data points in the stream will be deleted. :param start_dt: The datetime after which data points should be deleted or None if all data points from the beginning of time should be deleted. :param end_dt: The datetime before which data points should be deleted or None if all data points until the current time should be deleted. :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error
def _update_doc(self, func_doc): """更新文档信息,把原来的文档信息进行合并格式化, 即第一行为deprecated_doc(Deprecated: tip_info),下一行为原始func_doc""" deprecated_doc = "Deprecated" if self.tip_info: deprecated_doc = "{}: {}".format(deprecated_doc, self.tip_info) if func_doc: func_doc = "{}\n{}".format(deprecated_doc, func_doc) return func_doc
更新文档信息,把原来的文档信息进行合并格式化, 即第一行为deprecated_doc(Deprecated: tip_info),下一行为原始func_doc
def push(cpu, value, size): """ Writes a value in the stack. :param value: the value to put in the stack. :param size: the size of the value. """ assert size in (8, 16, cpu.address_bit_size) cpu.STACK = cpu.STACK - size // 8 base, _, _ = cpu.get_descriptor(cpu.read_register('SS')) address = cpu.STACK + base cpu.write_int(address, value, size)
Writes a value in the stack. :param value: the value to put in the stack. :param size: the size of the value.
def savvyize(self, input_string, recursive=False, stemma=False): ''' Determines which files should be processed NB: this is the PUBLIC method @returns filenames_list ''' input_string = os.path.abspath(input_string) tasks = [] restricted = [ symbol for symbol in self.settings['skip_if_path'] ] if self.settings['skip_if_path'] else [] # given folder if os.path.isdir(input_string): if recursive: for root, dirs, files in os.walk(input_string): # beware of broken links on unix! (NB find ~ -type l -exec rm -f {} \;) # skip_if_path directive to_filter = [] for dir in dirs: dir = u(dir) for rs in restricted: if dir.startswith(rs) or dir.endswith(rs): to_filter.append(dir) break dirs[:] = [x for x in dirs if x not in to_filter] for filename in files: # skip_if_path directive filename = u(filename) if restricted: for rs in restricted: if filename.startswith(rs) or filename.endswith(rs): break else: tasks.append(root + os.sep + filename) else: tasks.append(root + os.sep + filename) else: for filename in os.listdir(input_string): filename = u(filename) if os.path.isfile(input_string + os.sep + filename): # skip_if_path directive if restricted: for rs in restricted: if filename.startswith(rs) or filename.endswith(rs): break else: tasks.append(input_string + os.sep + filename) else: tasks.append(input_string + os.sep + filename) # given full filename elif os.path.isfile(input_string): tasks.append(input_string) # skip_if_path directive is not applicable here # given filename stemma else: if stemma: parent = os.path.dirname(input_string) for filename in os.listdir(parent): filename = u(filename) if input_string in parent + os.sep + filename and not os.path.isdir(parent + os.sep + filename): # skip_if_path directive if restricted: for rs in restricted: if filename.startswith(rs) or filename.endswith(rs): break else: tasks.append(parent + os.sep + filename) else: tasks.append(parent + os.sep + filename) return tasks
Determines which files should be processed NB: this is the PUBLIC method @returns filenames_list
def _get_connection(self): """Make SSH connection to the IOS XE device. The external ncclient library is used for creating this connection. This method keeps state of any existing connections and reuses them if already connected. Also interfaces (except management) are typically disabled by default when it is booted. So if connecting for the first time, driver will enable all other interfaces and keep that status in the `_itfcs_enabled` flag. """ try: if self._ncc_connection and self._ncc_connection.connected: return self._ncc_connection else: # ncclient needs 'name' to be 'csr' in order to communicate # with the device in the correct way. self._ncc_connection = manager.connect( host=self._host_ip, port=self._host_ssh_port, username=self._username, password=self._password, device_params={'name': "csr"}, timeout=self._timeout) if not self._itfcs_enabled: self._itfcs_enabled = self._enable_itfcs( self._ncc_connection) return self._ncc_connection except Exception as e: conn_params = {'host': self._host_ip, 'port': self._host_ssh_port, 'user': self._username, 'timeout': self._timeout, 'reason': e.message} raise cfg_exc.ConnectionException(**conn_params)
Make SSH connection to the IOS XE device. The external ncclient library is used for creating this connection. This method keeps state of any existing connections and reuses them if already connected. Also interfaces (except management) are typically disabled by default when it is booted. So if connecting for the first time, driver will enable all other interfaces and keep that status in the `_itfcs_enabled` flag.
def _wrap(value): """ Wraps the passed value in a Sequence if it is not a primitive. If it is a string argument it is expanded to a list of characters. >>> _wrap(1) 1 >>> _wrap("abc") ['a', 'b', 'c'] >>> type(_wrap([1, 2])) functional.pipeline.Sequence :param value: value to wrap :return: wrapped or not wrapped value """ if is_primitive(value): return value if isinstance(value, (dict, set)) or is_namedtuple(value): return value elif isinstance(value, collections.Iterable): try: if type(value).__name__ == 'DataFrame': import pandas if isinstance(value, pandas.DataFrame): return Sequence(value.values) except ImportError: # pragma: no cover pass return Sequence(value) else: return value
Wraps the passed value in a Sequence if it is not a primitive. If it is a string argument it is expanded to a list of characters. >>> _wrap(1) 1 >>> _wrap("abc") ['a', 'b', 'c'] >>> type(_wrap([1, 2])) functional.pipeline.Sequence :param value: value to wrap :return: wrapped or not wrapped value
def requeue(self, message_id, timeout=0, backoff=True): """Re-queue a message (indicate failure to process).""" self.send(nsq.requeue(message_id, timeout)) self.finish_inflight() self.on_requeue.send( self, message_id=message_id, timeout=timeout, backoff=backoff )
Re-queue a message (indicate failure to process).
def start_greedy_ensemble_search(automated_run, session, path): """Starts an automated ensemble search using greedy forward model selection. The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by Caruana. 1. Start with the empty ensemble 2. Add to the ensemble the model in the library that maximizes the ensemmble's performance on the error metric. 3. Repeat step 2 for a fixed number of iterations or until all models have been used. Args: automated_run (xcessiv.models.AutomatedRun): Automated run object session: Valid SQLAlchemy session path (str, unicode): Path to project folder """ module = functions.import_string_code_as_module(automated_run.source) assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators best_ensemble = [] # List containing IDs of best performing ensemble for the last round secondary_learner = automated_run.base_learner_origin.return_estimator() secondary_learner.set_params(**module.secondary_learner_hyperparameters) for i in range(module.max_num_base_learners): best_score = -float('inf') # Best metric for this round (not in total!) current_ensemble = best_ensemble[:] # Shallow copy of best ensemble for base_learner in session.query(models.BaseLearner).filter_by(job_status='finished').all(): if base_learner in current_ensemble: # Don't append when learner is already in continue current_ensemble.append(base_learner) # Check if our "best ensemble" already exists existing_ensemble = session.query(models.StackedEnsemble).\ filter_by(base_learner_origin_id=automated_run.base_learner_origin.id, secondary_learner_hyperparameters=secondary_learner.get_params(), base_learner_ids=sorted([bl.id for bl in current_ensemble])).first() if existing_ensemble and existing_ensemble.job_status == 'finished': score = existing_ensemble.individual_score[module.metric_to_optimize] elif existing_ensemble and existing_ensemble.job_status != 'finished': eval_stacked_ensemble(existing_ensemble, session, path) score = existing_ensemble.individual_score[module.metric_to_optimize] else: stacked_ensemble = models.StackedEnsemble( secondary_learner_hyperparameters=secondary_learner.get_params(), base_learners=current_ensemble, base_learner_origin=automated_run.base_learner_origin, job_status='started' ) session.add(stacked_ensemble) session.commit() eval_stacked_ensemble(stacked_ensemble, session, path) score = stacked_ensemble.individual_score[module.metric_to_optimize] score = -score if module.invert_metric else score if best_score < score: best_score = score best_ensemble = current_ensemble[:] current_ensemble.pop()
Starts an automated ensemble search using greedy forward model selection. The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by Caruana. 1. Start with the empty ensemble 2. Add to the ensemble the model in the library that maximizes the ensemmble's performance on the error metric. 3. Repeat step 2 for a fixed number of iterations or until all models have been used. Args: automated_run (xcessiv.models.AutomatedRun): Automated run object session: Valid SQLAlchemy session path (str, unicode): Path to project folder
def stopMessage(self, apiMsgId): """ See parent method for documentation """ content = self.parseRest(self.request('rest/message/' + apiMsgId, {}, {}, 'DELETE')) return { 'id': content['apiMessageId'].encode('utf-8'), 'status': content['messageStatus'].encode('utf-8'), 'description': self.getStatus(content['messageStatus']) }
See parent method for documentation
def _delete_resource(name, name_param, desc, res_type, wait=0, status_param=None, status_gone='deleted', region=None, key=None, keyid=None, profile=None, **args): ''' Delete a generic Elasticache resource. ''' try: wait = int(wait) except Exception: raise SaltInvocationError("Bad value ('{0}') passed for 'wait' param - must be an " "int or boolean.".format(wait)) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if name_param in args: log.info( "'name: %s' param being overridden by explicitly provided '%s: %s'", name, name_param, args[name_param] ) name = args[name_param] else: args[name_param] = name args = dict([(k, v) for k, v in args.items() if not k.startswith('_')]) try: func = 'delete_'+res_type f = getattr(conn, func) if wait: func = 'describe_'+res_type+'s' s = globals()[func] except (AttributeError, KeyError) as e: raise SaltInvocationError("No function '{0}()' found: {1}".format(func, e.message)) try: f(**args) if not wait: log.info('%s %s deletion requested.', desc.title(), name) return True log.info('Waiting up to %s seconds for %s %s to be deleted.', wait, desc, name) orig_wait = wait while wait > 0: r = s(name=name, conn=conn) if not r or r[0].get(status_param) == status_gone: log.info('%s %s deleted.', desc.title(), name) return True sleep = wait if wait % 60 == wait else 60 log.info('Sleeping %s seconds for %s %s to be deleted.', sleep, desc, name) time.sleep(sleep) wait -= sleep log.error('%s %s not deleted after %s seconds!', desc.title(), name, orig_wait) return False except botocore.exceptions.ClientError as e: log.error('Failed to delete %s %s: %s', desc, name, e) return False
Delete a generic Elasticache resource.
def asset_create_task(self, *args, **kwargs): """Create a new task :returns: None :rtype: None :raises: None """ if not self.cur_asset: return task = self.create_task(element=self.cur_asset) if task: taskdata = djitemdata.TaskItemData(task) treemodel.TreeItem(taskdata, self.asset_task_model.root)
Create a new task :returns: None :rtype: None :raises: None
def _refresh_outlineexplorer(self, index=None, update=True, clear=False): """Refresh outline explorer panel""" oe = self.outlineexplorer if oe is None: return if index is None: index = self.get_stack_index() if self.data: finfo = self.data[index] oe.setEnabled(True) if finfo.editor.oe_proxy is None: finfo.editor.oe_proxy = OutlineExplorerProxyEditor( finfo.editor, finfo.filename) oe.set_current_editor(finfo.editor.oe_proxy, update=update, clear=clear) if index != self.get_stack_index(): # The last file added to the outline explorer is not the # currently focused one in the editor stack. Therefore, # we need to force a refresh of the outline explorer to set # the current editor to the currently focused one in the # editor stack. See PR #8015. self._refresh_outlineexplorer(update=False) return self._sync_outlineexplorer_file_order()
Refresh outline explorer panel
def map_port(protocol, public_port, private_port, lifetime=3600, gateway_ip=None, retry=9, use_exception=True): """A function to map public_port to private_port of protocol. Returns the complete response on success. protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True. """ if protocol not in [NATPMP_PROTOCOL_UDP, NATPMP_PROTOCOL_TCP]: raise ValueError("Must be either NATPMP_PROTOCOL_UDP or " "NATPMP_PROTOCOL_TCP") if gateway_ip is None: gateway_ip = get_gateway_addr() response = None port_mapping_request = PortMapRequest(protocol, private_port, public_port, lifetime) port_mapping_response = \ send_request_with_retry(gateway_ip, port_mapping_request, response_data_class=PortMapResponse, retry=retry) if port_mapping_response.result != 0 and use_exception: raise NATPMPResultError(port_mapping_response.result, error_str(port_mapping_response.result), port_mapping_response) return port_mapping_response
A function to map public_port to private_port of protocol. Returns the complete response on success. protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True.
def customize_compiler_for_nvcc(compiler, nvcc_settings): """inject deep into distutils to customize gcc/nvcc dispatch """ # tell the compiler it can process .cu files compiler.src_extensions.append('.cu') # save references to the default compiler_so and _compile methods default_compiler_so = compiler.compiler_so default_compile = compiler._compile # now redefine the _compile method. This gets executed for each # object but distutils doesn't have the ability to change compilers # based on source extension: we add it. def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts): # Use NVCC for .cu files if os.path.splitext(src)[1] == '.cu': compiler.set_executable('compiler_so', nvcc_settings['nvcc_path']) default_compile(obj, src, ext, cc_args, extra_postargs, pp_opts) # reset the default compiler_so, which we might have changed for cuda compiler.compiler_so = default_compiler_so # inject our redefined _compile method into the class compiler._compile = _compile
inject deep into distutils to customize gcc/nvcc dispatch
def jinja_loader(self): """Search templates in custom app templates dir (default Flask behaviour), fallback on abilian templates.""" loaders = self._jinja_loaders del self._jinja_loaders loaders.append(Flask.jinja_loader.func(self)) loaders.reverse() return jinja2.ChoiceLoader(loaders)
Search templates in custom app templates dir (default Flask behaviour), fallback on abilian templates.
def get_ppis(self, ppi_df): """Generate Complex Statements from the HPRD PPI data. Parameters ---------- ppi_df : pandas.DataFrame DataFrame loaded from the BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt file. """ logger.info('Processing PPIs...') for ix, row in ppi_df.iterrows(): agA = self._make_agent(row['HPRD_ID_A']) agB = self._make_agent(row['HPRD_ID_B']) # If don't get valid agents for both, skip this PPI if agA is None or agB is None: continue isoform_id = '%s_1' % row['HPRD_ID_A'] ev_list = self._get_evidence( row['HPRD_ID_A'], isoform_id, row['PMIDS'], row['EVIDENCE'], 'interactions') stmt = Complex([agA, agB], evidence=ev_list) self.statements.append(stmt)
Generate Complex Statements from the HPRD PPI data. Parameters ---------- ppi_df : pandas.DataFrame DataFrame loaded from the BINARY_PROTEIN_PROTEIN_INTERACTIONS.txt file.
def document_frequencies(self, hashes): '''Get document frequencies for a list of hashes. This will return all zeros unless the index was written with `hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is included in `hashes`, that value will be returned with the total number of documents indexed. If you are looking for documents with that hash, pass :data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead. :param hashes: hashes to query :paramtype hashes: list of :class:`int` :return: map from hash to document frequency ''' result = {} for (k, v) in self.client.get(HASH_FREQUENCY_TABLE, *[(h,) for h in hashes]): if v is None: v = 0 result[k[0]] = v return result
Get document frequencies for a list of hashes. This will return all zeros unless the index was written with `hash_frequencies` set. If :data:`DOCUMENT_HASH_KEY` is included in `hashes`, that value will be returned with the total number of documents indexed. If you are looking for documents with that hash, pass :data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead. :param hashes: hashes to query :paramtype hashes: list of :class:`int` :return: map from hash to document frequency
def create_account(self, body, **kwargs): # noqa: E501 """Create a new account. # noqa: E501 An endpoint for creating a new account. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts -d '{\"display_name\": \"MyAccount1\", \"admin_name\": \"accountAdmin1\", \"email\": \"example_admin@myaccount.info\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.create_account(body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param AccountCreationReq body: Details of the account to be created. (required) :param str action: Action, either 'create' or 'enroll'. <ul><li>'create' creates the account where its admin user has ACTIVE status if admin_password was defined in the request, or RESET status if no admin_password was defined. If the user already exists, its status is not modified. </li><li>'enroll' creates the account where its admin user has ENROLLING status. If the user already exists, its status is not modified. Email to finish the enrollment or to notify the existing user about the new account is sent to the admin_email defined in the request. </li></ul> :return: AccountCreationResp If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.create_account_with_http_info(body, **kwargs) # noqa: E501 else: (data) = self.create_account_with_http_info(body, **kwargs) # noqa: E501 return data
Create a new account. # noqa: E501 An endpoint for creating a new account. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/accounts -d '{\"display_name\": \"MyAccount1\", \"admin_name\": \"accountAdmin1\", \"email\": \"example_admin@myaccount.info\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.create_account(body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param AccountCreationReq body: Details of the account to be created. (required) :param str action: Action, either 'create' or 'enroll'. <ul><li>'create' creates the account where its admin user has ACTIVE status if admin_password was defined in the request, or RESET status if no admin_password was defined. If the user already exists, its status is not modified. </li><li>'enroll' creates the account where its admin user has ENROLLING status. If the user already exists, its status is not modified. Email to finish the enrollment or to notify the existing user about the new account is sent to the admin_email defined in the request. </li></ul> :return: AccountCreationResp If the method is called asynchronously, returns the request thread.
def TargetDirectory(ID, season, relative=False, **kwargs): ''' Returns the location of the :py:mod:`everest` data on disk for a given target. :param ID: The target ID :param int season: The target season number :param bool relative: Relative path? Default :py:obj:`False` ''' if season is None: return None if relative: path = '' else: path = EVEREST_DAT return os.path.join(path, 'k2', 'c%02d' % season, ('%09d' % ID)[:4] + '00000', ('%09d' % ID)[4:])
Returns the location of the :py:mod:`everest` data on disk for a given target. :param ID: The target ID :param int season: The target season number :param bool relative: Relative path? Default :py:obj:`False`
def section_tortuosity(section): '''Tortuosity of a section The tortuosity is defined as the ratio of the path length of a section and the euclidian distnce between its end points. The path length is the sum of distances between consecutive points. If the section contains less than 2 points, the value 1 is returned. ''' pts = section.points return 1 if len(pts) < 2 else mm.section_length(pts) / mm.point_dist(pts[-1], pts[0])
Tortuosity of a section The tortuosity is defined as the ratio of the path length of a section and the euclidian distnce between its end points. The path length is the sum of distances between consecutive points. If the section contains less than 2 points, the value 1 is returned.
def prepare_url(hostname, path, params=None): """ Prepare Elasticsearch request url. :param hostname: host name :param path: request path :param params: optional url params :return: """ url = hostname + path if params: url = url + '?' + urlencode(params) if not url.startswith(('http:', 'https:')): url = "http://" + url return url.encode('utf-8')
Prepare Elasticsearch request url. :param hostname: host name :param path: request path :param params: optional url params :return:
def _parse_date_rfc822(dateString): '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' data = dateString.split() if data[0][-1] in (',', '.') or data[0].lower() in _daynames: del data[0] if len(data) == 4: s = data[3] s = s.split('+', 1) if len(s) == 2: data[3:] = s else: data.append('') dateString = " ".join(data) if len(data) < 5: dateString += ' 00:00:00 GMT' return email.utils.parsedate_tz(dateString)
Parse an RFC822, RFC1123, RFC2822, or asctime-style date
def ack(self, device_uuid, ack_keys): """ Acknowledge received data Send acknowledgement keys to let know the Sync service which data you have. As you fetch new data, you need to send acknowledgement keys. :calls: ``post /sync/ack`` :param string device_uuid: Device's UUID for which to perform synchronization. :param list ack_keys: List of acknowledgement keys. :return: True if the operation succeeded. :rtype: bool """ attributes = {'ack_keys': ack_keys} status_code, _, _ = self.http_client.post('/sync/ack', body=attributes, headers=self.build_headers(device_uuid)) return status_code == 202
Acknowledge received data Send acknowledgement keys to let know the Sync service which data you have. As you fetch new data, you need to send acknowledgement keys. :calls: ``post /sync/ack`` :param string device_uuid: Device's UUID for which to perform synchronization. :param list ack_keys: List of acknowledgement keys. :return: True if the operation succeeded. :rtype: bool
def do_symbols_matching(self): """ Performs symbols matching. """ self._clear_decorations() current_block = self.editor.textCursor().block() data = get_block_symbol_data(self.editor, current_block) pos = self.editor.textCursor().block().position() for symbol in [PAREN, SQUARE, BRACE]: self._match(symbol, data, pos)
Performs symbols matching.
def add_contacts( self, contacts: List["pyrogram.InputPhoneContact"] ): """Use this method to add contacts to your Telegram address book. Args: contacts (List of :obj:`InputPhoneContact <pyrogram.InputPhoneContact>`): The contact list to be added Returns: On success, the added contacts are returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. """ imported_contacts = self.send( functions.contacts.ImportContacts( contacts=contacts ) ) return imported_contacts
Use this method to add contacts to your Telegram address book. Args: contacts (List of :obj:`InputPhoneContact <pyrogram.InputPhoneContact>`): The contact list to be added Returns: On success, the added contacts are returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
def make_signature(name, params, common_params, common_param_values): """ Create a signature for a geom or stat Gets the DEFAULT_PARAMS (params) and creates are comma separated list of the `name=value` pairs. The common_params come first in the list, and they get take their values from either the params-dict or the common_geom_param_values-dict. """ tokens = [] seen = set() def tokens_append(key, value): if isinstance(value, str): value = "'{}'".format(value) tokens.append('{}={}'.format(key, value)) # preferred params come first for key in common_params: seen.add(key) try: value = params[key] except KeyError: value = common_param_values[key] tokens_append(key, value) # other params (these are the geom/stat specific parameters for key in (set(params) - seen): tokens_append(key, params[key]) # name, 1 opening bracket, 4 spaces in SIGNATURE_TPL s1 = name + '(' s2 = ', '.join(tokens) + ', **kwargs)' line_width = 78 - len(s1) indent_spaces = ' ' * (len(s1) + 4) newline_and_space = '\n' + indent_spaces s2_lines = wrap(s2, width=line_width) return s1 + newline_and_space.join(s2_lines)
Create a signature for a geom or stat Gets the DEFAULT_PARAMS (params) and creates are comma separated list of the `name=value` pairs. The common_params come first in the list, and they get take their values from either the params-dict or the common_geom_param_values-dict.
def copy_files(filelist, destdir): """Copy a list of files to destdir, preserving directory structure. File names should be relative to the current working directory. """ for filename in filelist: destfile = os.path.join(destdir, filename) # filename should not be absolute, but let's double-check assert destfile.startswith(destdir + os.path.sep) destfiledir = os.path.dirname(destfile) if not os.path.isdir(destfiledir): os.makedirs(destfiledir) if os.path.isdir(filename): os.mkdir(destfile) else: shutil.copy2(filename, destfile)
Copy a list of files to destdir, preserving directory structure. File names should be relative to the current working directory.
def ts_func(f): """ This wraps a function that would normally only accept an array and allows it to operate on a DataFrame. Useful for applying numpy functions to DataFrames. """ def wrap_func(df, *args): # TODO: should vectorize to apply over all columns? return Chromatogram(f(df.values, *args), df.index, df.columns) return wrap_func
This wraps a function that would normally only accept an array and allows it to operate on a DataFrame. Useful for applying numpy functions to DataFrames.
def __voronoi_finite_polygons_2d(vor, radius=None): """ Reconstruct infinite voronoi regions in a 2D diagram to finite regions. Parameters ---------- vor : Voronoi Input diagram radius : float, optional Distance to 'points at infinity'. Returns ------- regions : list of tuples Indices of vertices in each revised Voronoi regions. vertices : list of tuples Coordinates for revised Voronoi vertices. Same as coordinates of input vertices, with 'points at infinity' appended to the end. """ if vor.points.shape[1] != 2: raise ValueError("Requires 2D input") new_regions = [] new_vertices = vor.vertices.tolist() center = vor.points.mean(axis=0) if radius is None: radius = vor.points.ptp().max() # Construct a map containing all ridges for a given point all_ridges = {} for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices): all_ridges.setdefault(p1, []).append((p2, v1, v2)) all_ridges.setdefault(p2, []).append((p1, v1, v2)) # Reconstruct infinite regions for p1, region in enumerate(vor.point_region): vertices = vor.regions[region] if all(v >= 0 for v in vertices): # finite region new_regions.append(vertices) continue # reconstruct a non-finite region if p1 not in all_ridges: continue ridges = all_ridges[p1] new_region = [v for v in vertices if v >= 0] for p2, v1, v2 in ridges: if v2 < 0: v1, v2 = v2, v1 if v1 >= 0: # finite ridge: already in the region continue # Compute the missing endpoint of an infinite ridge t = vor.points[p2] - vor.points[p1] # tangent t /= np.linalg.norm(t) n = np.array([-t[1], t[0]]) # normal midpoint = vor.points[[p1, p2]].mean(axis=0) direction = np.sign(np.dot(midpoint - center, n)) * n far_point = vor.vertices[v2] + direction * radius new_region.append(len(new_vertices)) new_vertices.append(far_point.tolist()) # sort region counterclockwise vs = np.asarray([new_vertices[v] for v in new_region]) c = vs.mean(axis=0) angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0]) new_region = np.array(new_region)[np.argsort(angles)] # finish new_regions.append(new_region.tolist()) return new_regions, np.asarray(new_vertices)
Reconstruct infinite voronoi regions in a 2D diagram to finite regions. Parameters ---------- vor : Voronoi Input diagram radius : float, optional Distance to 'points at infinity'. Returns ------- regions : list of tuples Indices of vertices in each revised Voronoi regions. vertices : list of tuples Coordinates for revised Voronoi vertices. Same as coordinates of input vertices, with 'points at infinity' appended to the end.
def create_shield_layer(shield, hashcode): """Creates the layer for shields.""" return pgnreader.parse_pagan_file(('%s%spgn%s' % (PACKAGE_DIR, os.sep, os.sep)) + shield + '.pgn', hashcode, sym=False, invert=False)
Creates the layer for shields.
def push_dcp(event, callback, position='right'): """Push a callable for :class:`~flask_pluginkit.PluginManager`, :func:`push_dcp`. Example usage:: push_dcp('demo', lambda:'Hello dcp') .. versionadded:: 2.1.0 """ ctx = stack.top ctx.app.extensions.get('pluginkit').push_dcp(event, callback, position)
Push a callable for :class:`~flask_pluginkit.PluginManager`, :func:`push_dcp`. Example usage:: push_dcp('demo', lambda:'Hello dcp') .. versionadded:: 2.1.0
def make_index(gff_file): """ Make a sqlite database for fast retrieval of features. """ import gffutils db_file = gff_file + ".db" if need_update(gff_file, db_file): if op.exists(db_file): os.remove(db_file) logging.debug("Indexing `{0}`".format(gff_file)) gffutils.create_db(gff_file, db_file, merge_strategy="create_unique") else: logging.debug("Load index `{0}`".format(gff_file)) return gffutils.FeatureDB(db_file)
Make a sqlite database for fast retrieval of features.
def add_section(self, name=None, anchor=None, description='', comment='', helptext='', plot='', content='', autoformat=True, autoformat_type='markdown'): """ Add a section to the module report output """ # Default anchor if anchor is None: if name is not None: nid = name.lower().strip().replace(' ','-') anchor = '{}-{}'.format(self.anchor, nid) else: sl = len(self.sections) + 1 anchor = '{}-section-{}'.format(self.anchor, sl) # Skip if user has a config to remove this module section if anchor in config.remove_sections: logger.debug("Skipping section '{}' because specified in user config".format(anchor)) return # Sanitise anchor ID and check for duplicates anchor = report.save_htmlid(anchor) # See if we have a user comment in the config if anchor in config.section_comments: comment = config.section_comments[anchor] # Format the content if autoformat: if len(description) > 0: description = textwrap.dedent(description) if autoformat_type == 'markdown': description = markdown.markdown(description) if len(comment) > 0: comment = textwrap.dedent(comment) if autoformat_type == 'markdown': comment = markdown.markdown(comment) if len(helptext) > 0: helptext = textwrap.dedent(helptext) if autoformat_type == 'markdown': helptext = markdown.markdown(helptext) # Strip excess whitespace description = description.strip() comment = comment.strip() helptext = helptext.strip() self.sections.append({ 'name': name, 'anchor': anchor, 'description': description, 'comment': comment, 'helptext': helptext, 'plot': plot, 'content': content, 'print_section': any([ n is not None and len(n) > 0 for n in [description, comment, helptext, plot, content] ]) })
Add a section to the module report output
def monthly(usaf, year, field='GHI (W/m^2)'): """monthly insolation""" m = [] lastm = 1 usafdata = Data(usaf, year) t = 0 for r in usafdata: r['GHI (W/m^2)'] = r['Glo Mod (Wh/m^2)'] r['DHI (W/m^2)'] = r['Dif Mod (Wh/m^2)'] r['DNI (W/m^2)'] = r['Dir Mod (Wh/m^2)'] if r['datetime'].month != lastm: m.append(t/1000.) t = 0 lastm = r['utc_datetime'].month t += float(r[field]) return m
monthly insolation
def parse_napp(napp_id): """Convert a napp_id in tuple with username, napp name and version. Args: napp_id: String with the form 'username/napp[:version]' (version is optional). If no version is found, it will be None. Returns: tuple: A tuple with (username, napp, version) Raises: KytosException: If a NApp has not the form _username/name_. """ # `napp_id` regex, composed by two mandatory parts (username, napp_name) # and one optional (version). # username and napp_name need to start with a letter, are composed of # letters, numbers and uderscores and must have at least three characters. # They are separated by a colon. # version is optional and can take any format. Is is separated by a hyphen, # if a version is defined. regex = r'([a-zA-Z][a-zA-Z0-9_]{2,})/([a-zA-Z][a-zA-Z0-9_]{2,}):?(.+)?' compiled_regex = re.compile(regex) matched = compiled_regex.fullmatch(napp_id) if not matched: msg = '"{}" NApp has not the form username/napp_name[:version].' raise KytosException(msg.format(napp_id)) return matched.groups()
Convert a napp_id in tuple with username, napp name and version. Args: napp_id: String with the form 'username/napp[:version]' (version is optional). If no version is found, it will be None. Returns: tuple: A tuple with (username, napp, version) Raises: KytosException: If a NApp has not the form _username/name_.
def constraints(self): """ :rtype tuple :return: All constraints represented by this and parent sets. """ if self._parent is not None: return tuple(self._constraints) + self._parent.constraints return tuple(self._constraints)
:rtype tuple :return: All constraints represented by this and parent sets.
def intersect_arc(self, arc): ''' Given an arc, finds the intersection point(s) of this arc with that. Returns a list of 2x1 numpy arrays. The list has length 0, 1 or 2, depending on how many intesection points there are. Points are ordered along the arc. Intersection with the arc along the same circle (which means infinitely many points usually) is reported as no intersection at all. >>> a = Arc((0, 0), 1, -90, 90, True) >>> a.intersect_arc(Arc((1, 0), 1, 90, 270, True)) [array([ 0.5 , -0.866...]), array([ 0.5 , 0.866...])] >>> a.intersect_arc(Arc((1, 0), 1, 90, 180, True)) [array([ 0.5 , 0.866...])] >>> a.intersect_arc(Arc((1, 0), 1, 121, 239, True)) [] >>> a.intersect_arc(Arc((1, 0), 1, 120-tol, 240+tol, True)) # Without -tol and +tol the results differ on different architectures due to rounding (see Debian #813782). [array([ 0.5 , -0.866...]), array([ 0.5 , 0.866...])] ''' intersections = self.intersect_circle(arc.center, arc.radius) isections = [pt for pt in intersections if arc.contains_angle_degrees(arc.point_as_angle(pt))] return isections
Given an arc, finds the intersection point(s) of this arc with that. Returns a list of 2x1 numpy arrays. The list has length 0, 1 or 2, depending on how many intesection points there are. Points are ordered along the arc. Intersection with the arc along the same circle (which means infinitely many points usually) is reported as no intersection at all. >>> a = Arc((0, 0), 1, -90, 90, True) >>> a.intersect_arc(Arc((1, 0), 1, 90, 270, True)) [array([ 0.5 , -0.866...]), array([ 0.5 , 0.866...])] >>> a.intersect_arc(Arc((1, 0), 1, 90, 180, True)) [array([ 0.5 , 0.866...])] >>> a.intersect_arc(Arc((1, 0), 1, 121, 239, True)) [] >>> a.intersect_arc(Arc((1, 0), 1, 120-tol, 240+tol, True)) # Without -tol and +tol the results differ on different architectures due to rounding (see Debian #813782). [array([ 0.5 , -0.866...]), array([ 0.5 , 0.866...])]
def convert_multiPointSource(self, node): """ Convert the given node into a MultiPointSource object. :param node: a node with tag multiPointGeometry :returns: a :class:`openquake.hazardlib.source.MultiPointSource` """ geom = node.multiPointGeometry lons, lats = zip(*split_coords_2d(~geom.posList)) msr = valid.SCALEREL[~node.magScaleRel]() return source.MultiPointSource( source_id=node['id'], name=node['name'], tectonic_region_type=node.attrib.get('tectonicRegion'), mfd=self.convert_mfdist(node), magnitude_scaling_relationship=msr, rupture_aspect_ratio=~node.ruptAspectRatio, upper_seismogenic_depth=~geom.upperSeismoDepth, lower_seismogenic_depth=~geom.lowerSeismoDepth, nodal_plane_distribution=self.convert_npdist(node), hypocenter_distribution=self.convert_hpdist(node), mesh=geo.Mesh(F32(lons), F32(lats)), temporal_occurrence_model=self.get_tom(node))
Convert the given node into a MultiPointSource object. :param node: a node with tag multiPointGeometry :returns: a :class:`openquake.hazardlib.source.MultiPointSource`
def git_log_iterator(path): """ yield commits using git log -- <dir> """ N = 10 count = 0 while True: lines = _run_git_command_lines(['log', '--oneline', '-n', str(N), '--skip', str(count), '--', '.'], cwd=path) for line in lines: sha = line.split(' ', 1)[0] count += 1 yield sha if len(lines) < N: break
yield commits using git log -- <dir>
def clear_lock(self, key): """ Remove the lock file. """ lock_path = self._get_lock_path(key) os.remove(lock_path)
Remove the lock file.
def is_all_field_none(self): """ :rtype: bool """ if self._share_detail is not None: return False if self._start_date is not None: return False if self._end_date is not None: return False return True
:rtype: bool
def _temporary_keychain(): """ This function creates a temporary Mac keychain that we can use to work with credentials. This keychain uses a one-time password and a temporary file to store the data. We expect to have one keychain per socket. The returned SecKeychainRef must be freed by the caller, including calling SecKeychainDelete. Returns a tuple of the SecKeychainRef and the path to the temporary directory that contains it. """ # Unfortunately, SecKeychainCreate requires a path to a keychain. This # means we cannot use mkstemp to use a generic temporary file. Instead, # we're going to create a temporary directory and a filename to use there. # This filename will be 8 random bytes expanded into base64. We also need # some random bytes to password-protect the keychain we're creating, so we # ask for 40 random bytes. random_bytes = os.urandom(40) filename = base64.b16encode(random_bytes[:8]).decode('utf-8') password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8 tempdirectory = tempfile.mkdtemp() keychain_path = os.path.join(tempdirectory, filename).encode('utf-8') # We now want to create the keychain itself. keychain = Security.SecKeychainRef() status = Security.SecKeychainCreate( keychain_path, len(password), password, False, None, ctypes.byref(keychain) ) _assert_no_error(status) # Having created the keychain, we want to pass it off to the caller. return keychain, tempdirectory
This function creates a temporary Mac keychain that we can use to work with credentials. This keychain uses a one-time password and a temporary file to store the data. We expect to have one keychain per socket. The returned SecKeychainRef must be freed by the caller, including calling SecKeychainDelete. Returns a tuple of the SecKeychainRef and the path to the temporary directory that contains it.
def setdefault (self, key, *args): """Set lowercase key value and return.""" assert isinstance(key, basestring) return dict.setdefault(self, key.lower(), *args)
Set lowercase key value and return.
def get_step_f(step_f, lR2, lS2): """Update the stepsize of given the primal and dual errors. See Boyd (2011), section 3.4.1 """ mu, tau = 10, 2 if lR2 > mu*lS2: return step_f * tau elif lS2 > mu*lR2: return step_f / tau return step_f
Update the stepsize of given the primal and dual errors. See Boyd (2011), section 3.4.1
def wgan(cls, data:DataBunch, generator:nn.Module, critic:nn.Module, switcher:Callback=None, clip:float=0.01, **learn_kwargs): "Create a WGAN from `data`, `generator` and `critic`." return cls(data, generator, critic, NoopLoss(), WassersteinLoss(), switcher=switcher, clip=clip, **learn_kwargs)
Create a WGAN from `data`, `generator` and `critic`.
def from_coords(cls, coords, sort=True): """ Create a mesh object from a list of 3D coordinates (by sorting them) :params coords: list of coordinates :param sort: flag (default True) :returns: a :class:`Mesh` instance """ coords = list(coords) if sort: coords.sort() if len(coords[0]) == 2: # 2D coordinates lons, lats = zip(*coords) depths = None else: # 3D coordinates lons, lats, depths = zip(*coords) depths = numpy.array(depths) return cls(numpy.array(lons), numpy.array(lats), depths)
Create a mesh object from a list of 3D coordinates (by sorting them) :params coords: list of coordinates :param sort: flag (default True) :returns: a :class:`Mesh` instance
def change(self, inpt, hashfun=DEFAULT_HASHFUN): """Change the avatar by providing a new input. Uses the standard hash function if no one is given.""" self.img = self.__create_image(inpt, hashfun)
Change the avatar by providing a new input. Uses the standard hash function if no one is given.
def join_path(a, *p): """Join path tokens together similar to osp.join, but always use '/' instead of possibly '\' on windows.""" path = a for b in p: if len(b) == 0: continue if b.startswith('/'): path += b[1:] elif path == '' or path.endswith('/'): path += b else: path += '/' + b # END for each path token to add return path
Join path tokens together similar to osp.join, but always use '/' instead of possibly '\' on windows.
def sudo_yield_file_lines(file_path='/etc/NetworkManager/system-connections/*'): r"""Cat a file iterating/yielding one line at a time, shell will execute: `sudo cat $file_path` so if your shell doesn't have sudo or cat, no joy Input: file_path(str): glob stars are fine >> for line in sudo_yield_file_lines('/etc/NetworkManager/system-connections/*') """ # substitute your Windoze/DOS/PowerlessShell command here: sudo_cat_cmd = 'sudo cat {}'.format(file_path) process = subprocess.Popen(sudo_cat_cmd, stdout=subprocess.PIPE, shell=True) # read one line at a time, as it becomes available for line in iter(process.stdout.readline, ''): yield line
r"""Cat a file iterating/yielding one line at a time, shell will execute: `sudo cat $file_path` so if your shell doesn't have sudo or cat, no joy Input: file_path(str): glob stars are fine >> for line in sudo_yield_file_lines('/etc/NetworkManager/system-connections/*')
def get_symbols_list(self): '''Return a list of GdxSymb found in the GdxFile.''' slist = [] rc, nSymb, nElem = gdxcc.gdxSystemInfo(self.gdx_handle) assert rc, 'Unable to retrieve "%s" info' % self.filename self.number_symbols = nSymb self.number_elements = nElem slist = [None]*(nSymb+1) for j in range(0,nSymb+1): sinfo = self.get_sid_info(j) if j==0: sinfo['name'] = 'universal_set' slist[j] = GdxSymb(self,sinfo) return slist
Return a list of GdxSymb found in the GdxFile.