code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def search_upstream(self, device: devicetools.Device, name: str = 'upstream') -> 'Selection': """Return the network upstream of the given starting point, including the starting point itself. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, _ = prepare_full_example_2() You can pass both |Node| and |Element| objects and, optionally, the name of the newly created |Selection| object: >>> test = pub.selections.complete.copy('test') >>> test.search_upstream(hp.nodes.lahn_2) Selection("upstream", nodes=("dill", "lahn_1", "lahn_2"), elements=("land_dill", "land_lahn_1", "land_lahn_2", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) >>> test.search_upstream( ... hp.elements.stream_lahn_1_lahn_2, 'UPSTREAM') Selection("UPSTREAM", nodes="lahn_1", elements=("land_lahn_1", "stream_lahn_1_lahn_2")) Wrong device specifications result in errors like the following: >>> test.search_upstream(1) Traceback (most recent call last): ... TypeError: While trying to determine the upstream network of \ selection `test`, the following error occurred: Either a `Node` or \ an `Element` object is required as the "outlet device", but the given \ `device` value is of type `int`. >>> pub.selections.headwaters.search_upstream(hp.nodes.lahn_3) Traceback (most recent call last): ... KeyError: "While trying to determine the upstream network of \ selection `headwaters`, the following error occurred: 'No node named \ `lahn_3` available.'" Method |Selection.select_upstream| restricts the current selection to the one determined with the method |Selection.search_upstream|: >>> test.select_upstream(hp.nodes.lahn_2) Selection("test", nodes=("dill", "lahn_1", "lahn_2"), elements=("land_dill", "land_lahn_1", "land_lahn_2", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) On the contrary, the method |Selection.deselect_upstream| restricts the current selection to all devices not determined by method |Selection.search_upstream|: >>> complete = pub.selections.complete.deselect_upstream( ... hp.nodes.lahn_2) >>> complete Selection("complete", nodes="lahn_3", elements=("land_lahn_3", "stream_lahn_2_lahn_3")) If necessary, include the "outlet device" manually afterwards: >>> complete.nodes += hp.nodes.lahn_2 >>> complete Selection("complete", nodes=("lahn_2", "lahn_3"), elements=("land_lahn_3", "stream_lahn_2_lahn_3")) """ try: selection = Selection(name) if isinstance(device, devicetools.Node): node = self.nodes[device.name] return self.__get_nextnode(node, selection) if isinstance(device, devicetools.Element): element = self.elements[device.name] return self.__get_nextelement(element, selection) raise TypeError( f'Either a `Node` or an `Element` object is required ' f'as the "outlet device", but the given `device` value ' f'is of type `{objecttools.classname(device)}`.') except BaseException: objecttools.augment_excmessage( f'While trying to determine the upstream network of ' f'selection `{self.name}`')
Return the network upstream of the given starting point, including the starting point itself. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, _ = prepare_full_example_2() You can pass both |Node| and |Element| objects and, optionally, the name of the newly created |Selection| object: >>> test = pub.selections.complete.copy('test') >>> test.search_upstream(hp.nodes.lahn_2) Selection("upstream", nodes=("dill", "lahn_1", "lahn_2"), elements=("land_dill", "land_lahn_1", "land_lahn_2", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) >>> test.search_upstream( ... hp.elements.stream_lahn_1_lahn_2, 'UPSTREAM') Selection("UPSTREAM", nodes="lahn_1", elements=("land_lahn_1", "stream_lahn_1_lahn_2")) Wrong device specifications result in errors like the following: >>> test.search_upstream(1) Traceback (most recent call last): ... TypeError: While trying to determine the upstream network of \ selection `test`, the following error occurred: Either a `Node` or \ an `Element` object is required as the "outlet device", but the given \ `device` value is of type `int`. >>> pub.selections.headwaters.search_upstream(hp.nodes.lahn_3) Traceback (most recent call last): ... KeyError: "While trying to determine the upstream network of \ selection `headwaters`, the following error occurred: 'No node named \ `lahn_3` available.'" Method |Selection.select_upstream| restricts the current selection to the one determined with the method |Selection.search_upstream|: >>> test.select_upstream(hp.nodes.lahn_2) Selection("test", nodes=("dill", "lahn_1", "lahn_2"), elements=("land_dill", "land_lahn_1", "land_lahn_2", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) On the contrary, the method |Selection.deselect_upstream| restricts the current selection to all devices not determined by method |Selection.search_upstream|: >>> complete = pub.selections.complete.deselect_upstream( ... hp.nodes.lahn_2) >>> complete Selection("complete", nodes="lahn_3", elements=("land_lahn_3", "stream_lahn_2_lahn_3")) If necessary, include the "outlet device" manually afterwards: >>> complete.nodes += hp.nodes.lahn_2 >>> complete Selection("complete", nodes=("lahn_2", "lahn_3"), elements=("land_lahn_3", "stream_lahn_2_lahn_3"))
def generate(self, text): """Try to get the generated file. Args: text: The text that you want to generate. """ if not text: raise Exception("No text to speak") if len(text) >= self.MAX_CHARS: raise Exception("Number of characters must be less than 2000") params = self.__params.copy() params["text"] = text self._data = requests.get(self.TTS_URL, params=params, stream=False).iter_content()
Try to get the generated file. Args: text: The text that you want to generate.
def currentItemChanged(self, _currentIndex=None, _previousIndex=None): """ Updates the description text widget when the user clicks on a selector in the table. The _currentIndex and _previousIndex parameters are ignored. """ self.editor.clear() self.editor.setTextColor(QCOLOR_REGULAR) regItem = self.getCurrentRegItem() if regItem is None: return if self._importOnSelect and regItem.successfullyImported is None: self.importRegItem(regItem) if regItem.successfullyImported is None: self.editor.setTextColor(QCOLOR_NOT_IMPORTED) self.editor.setPlainText('<plugin not yet imported>') elif regItem.successfullyImported is False: self.editor.setTextColor(QCOLOR_ERROR) self.editor.setPlainText(str(regItem.exception)) elif regItem.descriptionHtml: self.editor.setHtml(regItem.descriptionHtml) else: self.editor.setPlainText(regItem.docString)
Updates the description text widget when the user clicks on a selector in the table. The _currentIndex and _previousIndex parameters are ignored.
def format_filename(self, data, row): """ Returns a formatted filename using the template stored in self.filename - `data`: vaping message - `row`: vaping message data row """ return self.filename.format(**self.filename_formatters(data, row))
Returns a formatted filename using the template stored in self.filename - `data`: vaping message - `row`: vaping message data row
def trace_memory_usage(self, frame, event, arg): """Callback for sys.settrace""" if (event in ('call', 'line', 'return') and frame.f_code in self.code_map): if event != 'call': # "call" event just saves the lineno but not the memory mem = _get_memory(-1, include_children=self.include_children) # if there is already a measurement for that line get the max old_mem = self.code_map[frame.f_code].get(self.prevline, 0) self.code_map[frame.f_code][self.prevline] = max(mem, old_mem) self.prevline = frame.f_lineno if self._original_trace_function is not None: (self._original_trace_function)(frame, event, arg) return self.trace_memory_usage
Callback for sys.settrace
def _assemble_lsid(self, module_number): """ Return an assembled LSID based off the provided module number and the authority's base LSID. Note: Never includes the module's version number. :param module_number: :return: string """ if self.base_lsid is None: raise Exception("Base LSID in LSID authority not initialized") return self.base_lsid + ":" + str(module_number)
Return an assembled LSID based off the provided module number and the authority's base LSID. Note: Never includes the module's version number. :param module_number: :return: string
def set_menu(self, menu): '''set a MPTopMenu on the frame''' self.menu = menu self.in_queue.put(MPImageMenu(menu))
set a MPTopMenu on the frame
def decrypt(key, ciphertext): """Decrypt Vigenere encrypted ``ciphertext`` using ``key``. Example: >>> decrypt("KEY", "RIJVS") HELLO Args: key (iterable): The key to use ciphertext (str): The text to decrypt Returns: Decrypted ciphertext """ index = 0 decrypted = "" for char in ciphertext: if char in string.punctuation + string.whitespace + string.digits: decrypted += char continue # Not part of the decryption # Rotate character by the alphabet position of the letter in the key alphabet = string.ascii_uppercase if key[index].isupper() else string.ascii_lowercase decrypted += ''.join(shift.decrypt(int(alphabet.index(key[index])), char)) index = (index + 1) % len(key) return decrypted
Decrypt Vigenere encrypted ``ciphertext`` using ``key``. Example: >>> decrypt("KEY", "RIJVS") HELLO Args: key (iterable): The key to use ciphertext (str): The text to decrypt Returns: Decrypted ciphertext
def print_table(seqs, id2name, name): """ print table of results # seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns], orfs?, introns?], ...]] """ itable = open('%s.itable' % (name.rsplit('.', 1)[0]), 'w') print('\t'.join(['#sequence', 'gene', 'model', 'insertion', 'gene position', 'model position', 'length', 'orf?', 'intron?', 'orf?intron?', 'insertion', 'orf', 'intron']), file=itable) for seq, info in list(seqs.items()): gene, model, insertions = info name = id2name[seq] for i, ins in enumerate(insertions, 1): gene_pos, model_pos, length, iseq, \ orfs, introns, orfs_b, introns_b, orf_annotations = ins # append annotation to orf header for orf in orfs: parts = orf[0].split() annotation = orf_annotations[parts[0].split('>')[1]] orf[0] = '%s %s %s' % (parts[0], annotation, ' '.join(parts[1:])) # get orf position gene_pos = '-'.join([str(j) for j in gene_pos]) # check if orf, intron is present if orfs_b is True or introns_b is True: orfs_introns_b = True else: orfs_introns_b = False out = [name, gene, model, i, gene_pos, model_pos, length, orfs_b, introns_b, orfs_introns_b] out.append('|'.join(iseq)) out.append('|'.join(['|'.join(orf) for orf in orfs])) out.append('|'.join(['|'.join(intron) for intron in introns])) print('\t'.join([str(i) for i in out]), file=itable) itable.close()
print table of results # seqs[id] = [gene, model, [[i-gene_pos, i-model_pos, i-length, iseq, [orfs], [introns], orfs?, introns?], ...]]
def datastore(self, domain, data_type, mapping=None): """Get instance of the DataStore module. Args: domain (str): The domain can be either "system", "organization", or "local". When using "organization" the data store can be accessed by any Application in the entire org, while "local" access is restricted to the App writing the data. The "system" option should not be used in almost all cases. data_type (str): The data type descriptor (e.g., tc:whois:cache). Returns: object: An instance of the DataStore Class. """ from .tcex_datastore import TcExDataStore return TcExDataStore(self, domain, data_type, mapping)
Get instance of the DataStore module. Args: domain (str): The domain can be either "system", "organization", or "local". When using "organization" the data store can be accessed by any Application in the entire org, while "local" access is restricted to the App writing the data. The "system" option should not be used in almost all cases. data_type (str): The data type descriptor (e.g., tc:whois:cache). Returns: object: An instance of the DataStore Class.
def create_dataset(args): """ Attempt to create a new dataset given the following params: * template_id * template_file * capacity * create_vault * [argument] dataset name or full path NOTE: genome_build has been deprecated and is no longer used. """ # For backwards compatibility, the "full_path" argument # can be a dataset filename, but only if vault and path # are set. If vault/path are both provided and there # are no forward-slashes in the "full_path", assume # the user has provided a dataset filename. if '/' not in args.full_path and args.vault and args.path: full_path, path_dict = Object.validate_full_path( '{0}:/{1}/{2}'.format(args.vault, args.path, args.full_path)) else: full_path, path_dict = Object.validate_full_path( args.full_path, vault=args.vault, path=args.path) # Accept a template_id or a template_file if args.template_id: # Validate the template ID try: tpl = solvebio.DatasetTemplate.retrieve(args.template_id) except solvebio.SolveError as e: if e.status_code != 404: raise e print("No template with ID {0} found!" .format(args.template_id)) sys.exit(1) elif args.template_file: mode = 'r' fopen = open if check_gzip_path(args.template_file): mode = 'rb' fopen = gzip.open # Validate the template file with fopen(args.template_file, mode) as fp: try: tpl_json = json.load(fp) except: print('Template file {0} could not be loaded. Please ' 'pass valid JSON'.format(args.template_file)) sys.exit(1) tpl = solvebio.DatasetTemplate.create(**tpl_json) print("A new dataset template was created with id: {0}".format(tpl.id)) else: print("Creating a new dataset {0} without a template." .format(full_path)) tpl = None fields = [] entity_type = None description = None if tpl: print("Creating new dataset {0} using the template '{1}'." .format(full_path, tpl.name)) fields = tpl.fields entity_type = tpl.entity_type # include template used to create description = 'Created with dataset template: {0}'.format(str(tpl.id)) return solvebio.Dataset.get_or_create_by_full_path( full_path, capacity=args.capacity, entity_type=entity_type, fields=fields, description=description, create_vault=args.create_vault, )
Attempt to create a new dataset given the following params: * template_id * template_file * capacity * create_vault * [argument] dataset name or full path NOTE: genome_build has been deprecated and is no longer used.
def delete_actions(self, form_id, action_ids): """ Remove actions from a form :param form_id: int :param action_ids: list|tuple :return: dict|str """ response = self._client.session.delete( '{url}/{form_id}/actions/delete'.format( url=self.endpoint_url, form_id=form_id ), params={'actions': action_ids} ) return self.process_response(response)
Remove actions from a form :param form_id: int :param action_ids: list|tuple :return: dict|str
def getitem_column_array(self, key): """Get column data for target labels. Args: key: Target labels by which to retrieve data. Returns: A new QueryCompiler. """ # Convert to list for type checking numeric_indices = list(self.columns.get_indexer_for(key)) # Internal indices is left blank and the internal # `apply_func_to_select_indices` will do the conversion and pass it in. def getitem(df, internal_indices=[]): return df.iloc[:, internal_indices] result = self.data.apply_func_to_select_indices( 0, getitem, numeric_indices, keep_remaining=False ) # We can't just set the columns to key here because there may be # multiple instances of a key. new_columns = self.columns[numeric_indices] new_dtypes = self.dtypes[numeric_indices] return self.__constructor__(result, self.index, new_columns, new_dtypes)
Get column data for target labels. Args: key: Target labels by which to retrieve data. Returns: A new QueryCompiler.
def _find_countour_yaml(start, checked, names=None): """Traverse the directory tree identified by start until a directory already in checked is encountered or the path of countour.yaml is found. Checked is present both to make the loop termination easy to reason about and so the same directories do not get rechecked Args: start: the path to start looking in and work upward from checked: the set of already checked directories Returns: the path of the countour.yaml file or None if it is not found """ extensions = [] if names: for name in names: if not os.path.splitext(name)[1]: extensions.append(name + ".yaml") extensions.append(name + ".yml") yaml_names = (names or []) + CONTOUR_YAML_NAMES + extensions directory = start while directory not in checked: checked.add(directory) for fs_yaml_name in yaml_names: yaml_path = os.path.join(directory, fs_yaml_name) if os.path.exists(yaml_path): return yaml_path directory = os.path.dirname(directory) return
Traverse the directory tree identified by start until a directory already in checked is encountered or the path of countour.yaml is found. Checked is present both to make the loop termination easy to reason about and so the same directories do not get rechecked Args: start: the path to start looking in and work upward from checked: the set of already checked directories Returns: the path of the countour.yaml file or None if it is not found
def _filter_attrs(self, feature, request): """ Remove some attributes from the feature and set the geometry to None in the feature based ``attrs`` and the ``no_geom`` parameters. """ if 'attrs' in request.params: attrs = request.params['attrs'].split(',') props = feature.properties new_props = {} for name in attrs: if name in props: new_props[name] = props[name] feature.properties = new_props if asbool(request.params.get('no_geom', False)): feature.geometry = None return feature
Remove some attributes from the feature and set the geometry to None in the feature based ``attrs`` and the ``no_geom`` parameters.
def flipcoords(xcoord, ycoord, axis): """ Flip the coordinates over a specific axis, to a different quadrant :type xcoord: integer :param xcoord: The x coordinate to flip :type ycoord: integer :param ycoord: The y coordinate to flip :type axis: string :param axis: The axis to flip across. Could be 'x' or 'y' """ axis = axis.lower() if axis == 'y': if xcoord > 0: return str(xcoord - xcoord - xcoord) + ', ' + str(ycoord) elif xcoord < 0: return str(xcoord + abs(xcoord) * 2) + ', ' + str(ycoord) elif xcoord == 0: return str(xcoord) + ', ' + str(ycoord) raise ValueError( "The X coordinate is neither larger, smaller or the same as 0.") elif axis == 'x': if ycoord > 0: return str(xcoord) + ', ' + str(ycoord - ycoord - ycoord) elif ycoord < 0: return str(ycoord + abs(ycoord) * 2) + ', ' + str(xcoord) elif ycoord == 0: return str(xcoord) + ', ' + str(ycoord) raise ValueError( "The Y coordinate is neither larger, smaller or the same as 0.") raise ValueError("Invalid axis. Neither x nor y was specified.")
Flip the coordinates over a specific axis, to a different quadrant :type xcoord: integer :param xcoord: The x coordinate to flip :type ycoord: integer :param ycoord: The y coordinate to flip :type axis: string :param axis: The axis to flip across. Could be 'x' or 'y'
def lxml(self) -> _LXML: """`lxml <http://lxml.de>`_ representation of the :class:`Element <Element>` or :class:`XML <XML>`. """ if self._lxml is None: self._lxml = etree.fromstring(self.raw_xml) return self._lxml
`lxml <http://lxml.de>`_ representation of the :class:`Element <Element>` or :class:`XML <XML>`.
def read_fwf(filepath_or_buffer: FilePathOrBuffer, colspecs='infer', widths=None, infer_nrows=100, **kwds): r""" Read a table of fixed-width formatted lines into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the `online docs for IO Tools <http://pandas.pydata.org/pandas-docs/stable/io.html>`_. Parameters ---------- filepath_or_buffer : str, path object, or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: file://localhost/path/to/table.csv. If you want to pass in a path object, pandas accepts either ``pathlib.Path`` or ``py._path.local.LocalPath``. By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. colspecs : list of tuple (int, int) or 'infer'. optional A list of tuples giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data which are not being skipped via skiprows (default='infer'). widths : list of int, optional A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. infer_nrows : int, default 100 The number of rows to consider when letting the parser determine the `colspecs`. .. versionadded:: 0.24.0 **kwds : optional Optional keyword arguments can be passed to ``TextFileReader``. Returns ------- DataFrame or TextParser A comma-separated values (csv) file is returned as two-dimensional data structure with labeled axes. See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. read_csv : Read a comma-separated values (csv) file into DataFrame. Examples -------- >>> pd.read_fwf('data.csv') # doctest: +SKIP """ # Check input arguments. if colspecs is None and widths is None: raise ValueError("Must specify either colspecs or widths") elif colspecs not in (None, 'infer') and widths is not None: raise ValueError("You must specify only one of 'widths' and " "'colspecs'") # Compute 'colspecs' from 'widths', if specified. if widths is not None: colspecs, col = [], 0 for w in widths: colspecs.append((col, col + w)) col += w kwds['colspecs'] = colspecs kwds['infer_nrows'] = infer_nrows kwds['engine'] = 'python-fwf' return _read(filepath_or_buffer, kwds)
r""" Read a table of fixed-width formatted lines into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the `online docs for IO Tools <http://pandas.pydata.org/pandas-docs/stable/io.html>`_. Parameters ---------- filepath_or_buffer : str, path object, or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: file://localhost/path/to/table.csv. If you want to pass in a path object, pandas accepts either ``pathlib.Path`` or ``py._path.local.LocalPath``. By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. colspecs : list of tuple (int, int) or 'infer'. optional A list of tuples giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data which are not being skipped via skiprows (default='infer'). widths : list of int, optional A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. infer_nrows : int, default 100 The number of rows to consider when letting the parser determine the `colspecs`. .. versionadded:: 0.24.0 **kwds : optional Optional keyword arguments can be passed to ``TextFileReader``. Returns ------- DataFrame or TextParser A comma-separated values (csv) file is returned as two-dimensional data structure with labeled axes. See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. read_csv : Read a comma-separated values (csv) file into DataFrame. Examples -------- >>> pd.read_fwf('data.csv') # doctest: +SKIP
def write_logfile(): # type: () -> None """Write a DEBUG log file COMMAND-YYYYMMDD-HHMMSS.ffffff.log.""" command = os.path.basename(os.path.realpath(os.path.abspath(sys.argv[0]))) now = datetime.datetime.now().strftime('%Y%m%d-%H%M%S.%f') filename = '{}-{}.log'.format(command, now) with open(filename, 'w') as logfile: if six.PY3: logfile.write(_LOGFILE_STREAM.getvalue()) else: logfile.write(_LOGFILE_STREAM.getvalue().decode( # type: ignore errors='replace'))
Write a DEBUG log file COMMAND-YYYYMMDD-HHMMSS.ffffff.log.
def _is_match(self, response, answer): """Does the response match the answer """ def compare_conditions(droppable_id, spatial_units, response_conditions): """Compare response coordinates with spatial units for droppable_id""" coordinate_match = True for coordinate in response_conditions['coordinate_conditions']['include'][droppable_id]: answer_match = False for spatial_unit in spatial_units: if (coordinate['containerId'] == spatial_unit['containerId'] and coordinate['coordinate'] in spatial_unit['spatialUnit']): answer_match = True break coordinate_match = coordinate_match and answer_match return coordinate_match # Did the consumer application already do the work for us? if response.has_zone_conditions(): return bool(response.get_zone_conditions() == answer.get_zone_conditions()) answer_conditions = self._get_conditions_map(answer) response_conditions = self._get_conditions_map(response) # Check to see if the lists of droppables used are the same: if set(answer_conditions['spatial_unit_conditions']['include']) != set(response_conditions['coordinate_conditions']['include']): return False # Compare included answer spatial unit areas to response coordinates for droppable_id, spatial_units in answer_conditions['spatial_unit_conditions']['include'].items(): # Do the number of defined include conditions match: if len(spatial_units) != len(response_conditions['coordinate_conditions']['include'][droppable_id]): return False if not compare_conditions(droppable_id, spatial_units, response_conditions): return False # Compare excluded answer spatial unit areas to response coordinates for droppable_id, spatial_units in answer_conditions['spatial_unit_conditions']['exclude'].items(): if compare_conditions(droppable_id, spatial_units, response_conditions): return False return True
Does the response match the answer
def cache_cluster_present(name, wait=900, security_groups=None, region=None, key=None, keyid=None, profile=None, **args): ''' Ensure a given cache cluster exists. name Name of the cache cluster (cache cluster id). wait Integer describing how long, in seconds, to wait for confirmation from AWS that the resource is in the desired state. Zero meaning to return success or failure immediately of course. Note that waiting for the cluster to become available is generally the better course, as failure to do so will often lead to subsequent failures when managing dependent resources. security_groups One or more VPC security groups (names and/or IDs) associated with the cache cluster. .. note:: This is additive with any sec groups provided via the SecurityGroupIds parameter below. Use this parameter ONLY when you are creating a cluster in a VPC. CacheClusterId The node group (shard) identifier. This parameter is stored as a lowercase string. Constraints: - A name must contain from 1 to 20 alphanumeric characters or hyphens. - The first character must be a letter. - A name cannot end with a hyphen or contain two consecutive hyphens. .. note:: In general this parameter is not needed, as 'name' is used if it's not provided. ReplicationGroupId The ID of the replication group to which this cache cluster should belong. If this parameter is specified, the cache cluster is added to the specified replication group as a read replica; otherwise, the cache cluster is a standalone primary that is not part of any replication group. If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cache cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones. .. note: This parameter is ONLY valid if the Engine parameter is redis. Due to current limitations on Redis (cluster mode disabled), this parameter is not supported on Redis (cluster mode enabled) replication groups. AZMode Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode. .. note:: This parameter is ONLY supported for Memcached cache clusters. PreferredAvailabilityZone The EC2 Availability Zone in which the cache cluster is created. All nodes belonging to this Memcached cache cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones. Default: System chosen Availability Zone. PreferredAvailabilityZones A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important. The number of Availability Zones listed must equal the value of NumCacheNodes. If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. .. note:: This option is ONLY supported on Memcached. If you are creating your cache cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. NumCacheNodes The initial (integer) number of cache nodes that the cache cluster has. .. note:: For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20. CacheNodeType The compute and memory capacity of the nodes in the node group (shard). Valid node types (and pricing for them) are exhaustively described at https://aws.amazon.com/elasticache/pricing/ .. note:: All T2 instances must be created in a VPC Redis backup/restore is not supported for Redis (cluster mode disabled) T1 and T2 instances. Backup/restore is supported on Redis (cluster mode enabled) T2 instances. Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances. Engine The name of the cache engine to be used for this cache cluster. Valid values for this parameter are: memcached | redis EngineVersion The version number of the cache engine to be used for this cache cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation. .. note:: You can upgrade to a newer engine version but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version. CacheParameterGroupName The name of the parameter group to associate with this cache cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a cluster. CacheSubnetGroupName The name of the Cache Subnet Group to be used for the cache cluster. Use this parameter ONLY when you are creating a cache cluster within a VPC. .. note:: If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. CacheSecurityGroupNames A list of Cache Security Group names to associate with this cache cluster. Use this parameter ONLY when you are creating a cache cluster outside of a VPC. SecurityGroupIds One or more VPC security groups associated with the cache cluster. Use this parameter ONLY when you are creating a cache cluster within a VPC. Tags A list of tags to be added to this resource. Note that due to shortcomings in the AWS API for Elasticache, these can only be set during resource creation - later modification is not (currently) supported. SnapshotArns A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas. .. note:: This parameter is ONLY valid if the Engine parameter is redis. SnapshotName The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. .. note:: This parameter is ONLY valid if the Engine parameter is redis. PreferredMaintenanceWindow Specifies the weekly time range during which maintenance on the cache cluster is permitted. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun, mon, tue, wed, thu, fri, sat Example: sun:23:00-mon:01:30 Port The port number on which each of the cache nodes accepts connections. Default: 6379 NotificationTopicArn The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. .. note:: The Amazon SNS topic owner must be the same as the cache cluster owner. AutoMinorVersionUpgrade This (boolean) parameter is currently disabled. SnapshotRetentionLimit The number of days for which ElastiCache retains automatic snapshots before deleting them. Default: 0 (i.e., automatic backups are disabled for this cache cluster). .. note:: This parameter is ONLY valid if the Engine parameter is redis. SnapshotWindow The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. Example: 05:00-09:00 .. note:: This parameter is ONLY valid if the Engine parameter is redis. AuthToken The password used to access a password protected server. Password constraints: - Must be only printable ASCII characters. - Must be at least 16 characters and no more than 128 characters in length. - Cannot contain any of the following characters: '/', '"', or "@". CacheNodeIdsToRemove A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002, etc.). This parameter is only valid when NumCacheNodes is less than the existing number of cache nodes. The number of cache node IDs supplied in this parameter must match the difference between the existing number of cache nodes in the cluster or pending cache nodes, whichever is greater, and the value of NumCacheNodes in the request. NewAvailabilityZones The list of Availability Zones where the new Memcached cache nodes are created. This parameter is only valid when NumCacheNodes in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request. Note: This option is only supported on Memcached clusters. NotificationTopicStatus The status of the SNS notification topic. Notifications are sent only if the status is active. Valid values: active | inactive region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} args = dict([(k, v) for k, v in args.items() if not k.startswith('_')]) current = __salt__['boto3_elasticache.' 'describe_cache_clusters'](name, region=region, key=key, keyid=keyid, profile=profile) if current: check_update = True else: check_update = False only_on_modify = [ 'CacheNodeIdsToRemove', 'NewAvailabilityZones', 'NotificationTopicStatus' ] create_args = {} for k, v in args.items(): if k in only_on_modify: check_update = True else: create_args[k] = v if __opts__['test']: ret['comment'] = 'Cache cluster {0} would be created.'.format(name) ret['result'] = None return ret created = __salt__['boto3_elasticache.' 'create_cache_cluster'](name, wait=wait, security_groups=security_groups, region=region, key=key, keyid=keyid, profile=profile, **create_args) if created: new = __salt__['boto3_elasticache.' 'describe_cache_clusters'](name, region=region, key=key, keyid=keyid, profile=profile) ret['comment'] = 'Cache cluster {0} was created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = new[0] else: ret['result'] = False ret['comment'] = 'Failed to create {0} cache cluster.'.format(name) if check_update: # Refresh this in case we're updating from 'only_on_modify' above... updated = __salt__['boto3_elasticache.' 'describe_cache_clusters'](name, region=region, key=key, keyid=keyid, profile=profile) need_update = _diff_cache_cluster(updated['CacheClusters'][0], args) if need_update: if __opts__['test']: ret['comment'] = 'Cache cluster {0} would be modified.'.format(name) ret['result'] = None return ret modified = __salt__['boto3_elasticache.' 'modify_cache_cluster'](name, wait=wait, security_groups=security_groups, region=region, key=key, keyid=keyid, profile=profile, **need_update) if modified: new = __salt__['boto3_elasticache.' 'describe_cache_clusters'](name, region=region, key=key, keyid=keyid, profile=profile) if ret['comment']: # 'create' just ran... ret['comment'] += ' ... and then immediately modified.' else: ret['comment'] = 'Cache cluster {0} was modified.'.format(name) ret['changes']['old'] = current ret['changes']['new'] = new[0] else: ret['result'] = False ret['comment'] = 'Failed to modify cache cluster {0}.'.format(name) else: ret['comment'] = 'Cache cluster {0} is in the desired state.'.format(name) return ret
Ensure a given cache cluster exists. name Name of the cache cluster (cache cluster id). wait Integer describing how long, in seconds, to wait for confirmation from AWS that the resource is in the desired state. Zero meaning to return success or failure immediately of course. Note that waiting for the cluster to become available is generally the better course, as failure to do so will often lead to subsequent failures when managing dependent resources. security_groups One or more VPC security groups (names and/or IDs) associated with the cache cluster. .. note:: This is additive with any sec groups provided via the SecurityGroupIds parameter below. Use this parameter ONLY when you are creating a cluster in a VPC. CacheClusterId The node group (shard) identifier. This parameter is stored as a lowercase string. Constraints: - A name must contain from 1 to 20 alphanumeric characters or hyphens. - The first character must be a letter. - A name cannot end with a hyphen or contain two consecutive hyphens. .. note:: In general this parameter is not needed, as 'name' is used if it's not provided. ReplicationGroupId The ID of the replication group to which this cache cluster should belong. If this parameter is specified, the cache cluster is added to the specified replication group as a read replica; otherwise, the cache cluster is a standalone primary that is not part of any replication group. If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cache cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones. .. note: This parameter is ONLY valid if the Engine parameter is redis. Due to current limitations on Redis (cluster mode disabled), this parameter is not supported on Redis (cluster mode enabled) replication groups. AZMode Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode. .. note:: This parameter is ONLY supported for Memcached cache clusters. PreferredAvailabilityZone The EC2 Availability Zone in which the cache cluster is created. All nodes belonging to this Memcached cache cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones. Default: System chosen Availability Zone. PreferredAvailabilityZones A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important. The number of Availability Zones listed must equal the value of NumCacheNodes. If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. .. note:: This option is ONLY supported on Memcached. If you are creating your cache cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. NumCacheNodes The initial (integer) number of cache nodes that the cache cluster has. .. note:: For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20. CacheNodeType The compute and memory capacity of the nodes in the node group (shard). Valid node types (and pricing for them) are exhaustively described at https://aws.amazon.com/elasticache/pricing/ .. note:: All T2 instances must be created in a VPC Redis backup/restore is not supported for Redis (cluster mode disabled) T1 and T2 instances. Backup/restore is supported on Redis (cluster mode enabled) T2 instances. Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances. Engine The name of the cache engine to be used for this cache cluster. Valid values for this parameter are: memcached | redis EngineVersion The version number of the cache engine to be used for this cache cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation. .. note:: You can upgrade to a newer engine version but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version. CacheParameterGroupName The name of the parameter group to associate with this cache cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a cluster. CacheSubnetGroupName The name of the Cache Subnet Group to be used for the cache cluster. Use this parameter ONLY when you are creating a cache cluster within a VPC. .. note:: If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. CacheSecurityGroupNames A list of Cache Security Group names to associate with this cache cluster. Use this parameter ONLY when you are creating a cache cluster outside of a VPC. SecurityGroupIds One or more VPC security groups associated with the cache cluster. Use this parameter ONLY when you are creating a cache cluster within a VPC. Tags A list of tags to be added to this resource. Note that due to shortcomings in the AWS API for Elasticache, these can only be set during resource creation - later modification is not (currently) supported. SnapshotArns A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas. .. note:: This parameter is ONLY valid if the Engine parameter is redis. SnapshotName The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. .. note:: This parameter is ONLY valid if the Engine parameter is redis. PreferredMaintenanceWindow Specifies the weekly time range during which maintenance on the cache cluster is permitted. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are: sun, mon, tue, wed, thu, fri, sat Example: sun:23:00-mon:01:30 Port The port number on which each of the cache nodes accepts connections. Default: 6379 NotificationTopicArn The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. .. note:: The Amazon SNS topic owner must be the same as the cache cluster owner. AutoMinorVersionUpgrade This (boolean) parameter is currently disabled. SnapshotRetentionLimit The number of days for which ElastiCache retains automatic snapshots before deleting them. Default: 0 (i.e., automatic backups are disabled for this cache cluster). .. note:: This parameter is ONLY valid if the Engine parameter is redis. SnapshotWindow The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard). If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. Example: 05:00-09:00 .. note:: This parameter is ONLY valid if the Engine parameter is redis. AuthToken The password used to access a password protected server. Password constraints: - Must be only printable ASCII characters. - Must be at least 16 characters and no more than 128 characters in length. - Cannot contain any of the following characters: '/', '"', or "@". CacheNodeIdsToRemove A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002, etc.). This parameter is only valid when NumCacheNodes is less than the existing number of cache nodes. The number of cache node IDs supplied in this parameter must match the difference between the existing number of cache nodes in the cluster or pending cache nodes, whichever is greater, and the value of NumCacheNodes in the request. NewAvailabilityZones The list of Availability Zones where the new Memcached cache nodes are created. This parameter is only valid when NumCacheNodes in the request is greater than the sum of the number of active cache nodes and the number of cache nodes pending creation (which may be zero). The number of Availability Zones supplied in this list must match the cache nodes being added in this request. Note: This option is only supported on Memcached clusters. NotificationTopicStatus The status of the SNS notification topic. Notifications are sent only if the status is active. Valid values: active | inactive region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
def freeze(value): """ Cast value to its frozen counterpart. """ if isinstance(value, list): return FrozenList(*value) if isinstance(value, dict): return FrozenDict(**value) return value
Cast value to its frozen counterpart.
def combine_calls(*args): """Combine multiple callsets into a final set of merged calls. """ if len(args) == 3: is_cwl = False batch_id, samples, data = args caller_names, vrn_files = _organize_variants(samples, batch_id) else: is_cwl = True samples = [utils.to_single_data(x) for x in args] samples = [cwlutils.unpack_tarballs(x, x) for x in samples] data = samples[0] batch_id = data["batch_id"] caller_names = data["variants"]["variantcallers"] vrn_files = data["variants"]["calls"] logger.info("Ensemble consensus calls for {0}: {1}".format( batch_id, ",".join(caller_names))) edata = copy.deepcopy(data) base_dir = utils.safe_makedir(os.path.join(edata["dirs"]["work"], "ensemble", batch_id)) if any([vcfutils.vcf_has_variants(f) for f in vrn_files]): # Decompose multiallelic variants and normalize passonly = not tz.get_in(["config", "algorithm", "ensemble", "use_filtered"], edata, False) vrn_files = [normalize.normalize(f, data, passonly=passonly, rerun_effects=False, remove_oldeffects=True, nonrefonly=True, work_dir=utils.safe_makedir(os.path.join(base_dir, c))) for c, f in zip(caller_names, vrn_files)] if "classifiers" not in (dd.get_ensemble(edata) or {}): callinfo = _run_ensemble_intersection(batch_id, vrn_files, caller_names, base_dir, edata) else: config_file = _write_config_file(batch_id, caller_names, base_dir, edata) callinfo = _run_ensemble(batch_id, vrn_files, config_file, base_dir, dd.get_ref_file(edata), edata) callinfo["vrn_file"] = vcfutils.bgzip_and_index(callinfo["vrn_file"], data["config"]) # After decomposing multiallelic variants and normalizing, re-evaluate effects ann_ma_file, _ = effects.add_to_vcf(callinfo["vrn_file"], data) if ann_ma_file: callinfo["vrn_file"] = ann_ma_file edata["config"]["algorithm"]["variantcaller"] = "ensemble" edata["vrn_file"] = callinfo["vrn_file"] edata["ensemble_bed"] = callinfo["bed_file"] callinfo["validate"] = validate.compare_to_rm(edata)[0][0].get("validate") else: out_vcf_file = os.path.join(base_dir, "{0}-ensemble.vcf".format(batch_id)) vcfutils.write_empty_vcf(out_vcf_file, samples=[dd.get_sample_name(d) for d in samples]) callinfo = {"variantcaller": "ensemble", "vrn_file": vcfutils.bgzip_and_index(out_vcf_file, data["config"]), "bed_file": None} if is_cwl: callinfo["batch_samples"] = data["batch_samples"] callinfo["batch_id"] = batch_id return [{"ensemble": callinfo}] else: return [[batch_id, callinfo]]
Combine multiple callsets into a final set of merged calls.
def _sslobj(sock): """Returns the underlying PySLLSocket object with which the C extension functions interface. """ pass if isinstance(sock._sslobj, _ssl._SSLSocket): return sock._sslobj else: return sock._sslobj._sslobj
Returns the underlying PySLLSocket object with which the C extension functions interface.
def request_sensor_value(self, req, msg): """Request the value of a sensor or sensors. A list of sensor values as a sequence of #sensor-value informs. Parameters ---------- name : str, optional Name of the sensor to poll (the default is to send values for all sensors). If name starts and ends with '/' it is treated as a regular expression and all sensors whose names contain the regular expression are returned. Informs ------- timestamp : float Timestamp of the sensor reading in seconds since the Unix epoch, or milliseconds for katcp versions <= 4. count : {1} Number of sensors described in this #sensor-value inform. Will always be one. It exists to keep this inform compatible with #sensor-status. name : str Name of the sensor whose value is being reported. value : object Value of the named sensor. Type depends on the type of the sensor. Returns ------- success : {'ok', 'fail'} Whether sending the list of values succeeded. informs : int Number of #sensor-value inform messages sent. Examples -------- :: ?sensor-value #sensor-value 1244631611.415231 1 psu.voltage 4.5 #sensor-value 1244631611.415200 1 cpu.status off ... !sensor-value ok 5 ?sensor-value cpu.power.on #sensor-value 1244631611.415231 1 cpu.power.on 0 !sensor-value ok 1 """ exact, name_filter = construct_name_filter(msg.arguments[0] if msg.arguments else None) sensors = [(name, sensor) for name, sensor in sorted(self._sensors.iteritems()) if name_filter(name)] if exact and not sensors: return req.make_reply("fail", "Unknown sensor name.") katcp_version = self.PROTOCOL_INFO.major for name, sensor in sensors: timestamp, status, value = sensor.read_formatted(katcp_version) req.inform(timestamp, "1", name, status, value) return req.make_reply("ok", str(len(sensors)))
Request the value of a sensor or sensors. A list of sensor values as a sequence of #sensor-value informs. Parameters ---------- name : str, optional Name of the sensor to poll (the default is to send values for all sensors). If name starts and ends with '/' it is treated as a regular expression and all sensors whose names contain the regular expression are returned. Informs ------- timestamp : float Timestamp of the sensor reading in seconds since the Unix epoch, or milliseconds for katcp versions <= 4. count : {1} Number of sensors described in this #sensor-value inform. Will always be one. It exists to keep this inform compatible with #sensor-status. name : str Name of the sensor whose value is being reported. value : object Value of the named sensor. Type depends on the type of the sensor. Returns ------- success : {'ok', 'fail'} Whether sending the list of values succeeded. informs : int Number of #sensor-value inform messages sent. Examples -------- :: ?sensor-value #sensor-value 1244631611.415231 1 psu.voltage 4.5 #sensor-value 1244631611.415200 1 cpu.status off ... !sensor-value ok 5 ?sensor-value cpu.power.on #sensor-value 1244631611.415231 1 cpu.power.on 0 !sensor-value ok 1
def BIC(self,data=None): ''' BIC on the passed data. If passed data is None (default), calculates BIC on the model's assigned data ''' # NOTE: in principle this method computes the BIC only after finding the # maximum likelihood parameters (or, of course, an EM fixed-point as an # approximation!) assert data is None and len(self.states_list) > 0, 'Must have data to get BIC' if data is None: return -2*sum(self.log_likelihood(s.data).sum() for s in self.states_list) + \ self.num_parameters() * np.log( sum(s.data.shape[0] for s in self.states_list)) else: return -2*self.log_likelihood(data) + self.num_parameters() * np.log(data.shape[0])
BIC on the passed data. If passed data is None (default), calculates BIC on the model's assigned data
def deps_used(self, pkg, used): """Create dependencies dictionary """ if find_package(pkg + self.meta.sp, self.meta.pkg_path): if pkg not in self.deps_dict.values(): self.deps_dict[pkg] = used else: self.deps_dict[pkg] += used
Create dependencies dictionary
def Throughput(self): """Combined throughput from multiplying all the components together. Returns ------- throughput : `~pysynphot.spectrum.TabularSpectralElement` or `None` Combined throughput. """ try: throughput = spectrum.TabularSpectralElement() product = self._multiplyThroughputs(0) throughput._wavetable = product.GetWaveSet() throughput._throughputtable = product(throughput._wavetable) throughput.waveunits = product.waveunits throughput.name='*'.join([str(x) for x in self.components]) ## throughput = throughput.resample(spectrum._default_waveset) return throughput except IndexError: # graph table is broken. return None
Combined throughput from multiplying all the components together. Returns ------- throughput : `~pysynphot.spectrum.TabularSpectralElement` or `None` Combined throughput.
def clean(self): """ Prevents cycles in the tree. """ super(CTENode, self).clean() if self.parent and self.pk in getattr(self.parent, self._cte_node_path): raise ValidationError(_("A node cannot be made a descendant of itself."))
Prevents cycles in the tree.
def synthesize(self, duration): """ Synthesize white noise Args: duration (numpy.timedelta64): The duration of the synthesized sound """ sr = self.samplerate.samples_per_second seconds = duration / Seconds(1) samples = np.random.uniform(low=-1., high=1., size=int(sr * seconds)) return AudioSamples(samples, self.samplerate)
Synthesize white noise Args: duration (numpy.timedelta64): The duration of the synthesized sound
def int_global_to_local(self, index, axis=0): """ Calculate local index from global index for integer input :param index: global index as integer :param axis: current axis to process :return: """ # Warum >= an dieser Stelle. Eigentlich sollte > ausreichend sein! Test! if index >= self.__mask[axis].stop-self.__halos[1][axis]: return None if index < self.__mask[axis].start+self.__halos[0][axis]: return None return index-self.__mask[axis].start
Calculate local index from global index for integer input :param index: global index as integer :param axis: current axis to process :return:
def format_number(col, d): """ Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places with HALF_EVEN round mode, and returns the result as a string. :param col: the column name of the numeric value to be formatted :param d: the N decimal places >>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect() [Row(v=u'5.0000')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places with HALF_EVEN round mode, and returns the result as a string. :param col: the column name of the numeric value to be formatted :param d: the N decimal places >>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect() [Row(v=u'5.0000')]
def cd_previous(self): """ cd to the gDirectory before this file was open. """ if self._prev_dir is None or isinstance(self._prev_dir, ROOT.TROOT): return False if isinstance(self._prev_dir, ROOT.TFile): if self._prev_dir.IsOpen() and self._prev_dir.IsWritable(): self._prev_dir.cd() return True return False if not self._prev_dir.IsWritable(): # avoid warning from ROOT stating file is not writable return False prev_file = self._prev_dir.GetFile() if prev_file and prev_file.IsOpen(): self._prev_dir.cd() return True return False
cd to the gDirectory before this file was open.
def predict_proba(self, a, b, device=None): """Infer causal directions using the trained NCC pairwise model. Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 device (str): Device to run the algorithm on (defaults to ``cdt.SETTINGS.default_device``) Returns: float: Causation score (Value : 1 if a->b and -1 if b->a) """ device = SETTINGS.get_default(device=device) if self.model is None: print('Model has to be trained before doing any predictions') raise ValueError if len(np.array(a).shape) == 1: a = np.array(a).reshape((-1, 1)) b = np.array(b).reshape((-1, 1)) m = np.hstack((a, b)) m = scale(m) m = m.astype('float32') m = th.from_numpy(m).t().unsqueeze(0) if th.cuda.is_available(): m = m.cuda() return (self.model(m).data.cpu().numpy()-.5) * 2
Infer causal directions using the trained NCC pairwise model. Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 device (str): Device to run the algorithm on (defaults to ``cdt.SETTINGS.default_device``) Returns: float: Causation score (Value : 1 if a->b and -1 if b->a)
def declaration_path(decl): """ Returns a list of parent declarations names. Args: decl (declaration_t): declaration for which declaration path should be calculated. Returns: list[(str | basestring)]: list of names, where first item is the top parent name and last item the inputted declaration name. """ if not decl: return [] if not decl.cache.declaration_path: result = [decl.name] parent = decl.parent while parent: if parent.cache.declaration_path: result.reverse() decl.cache.declaration_path = parent.cache.declaration_path + \ result return decl.cache.declaration_path else: result.append(parent.name) parent = parent.parent result.reverse() decl.cache.declaration_path = result return result return decl.cache.declaration_path
Returns a list of parent declarations names. Args: decl (declaration_t): declaration for which declaration path should be calculated. Returns: list[(str | basestring)]: list of names, where first item is the top parent name and last item the inputted declaration name.
def find_recipients_conversations(self, context=None, exclude=None, from_conversation_id=None, permissions=None, search=None, type=None, user_id=None): """ Find recipients. Find valid recipients (users, courses and groups) that the current user can send messages to. The /api/v1/search/recipients path is the preferred endpoint, /api/v1/conversations/find_recipients is deprecated. Pagination is supported. """ path = {} data = {} params = {} # OPTIONAL - search """Search terms used for matching users/courses/groups (e.g. "bob smith"). If multiple terms are given (separated via whitespace), only results matching all terms will be returned.""" if search is not None: params["search"] = search # OPTIONAL - context """Limit the search to a particular course/group (e.g. "course_3" or "group_4").""" if context is not None: params["context"] = context # OPTIONAL - exclude """Array of ids to exclude from the search. These may be user ids or course/group ids prefixed with "course_" or "group_" respectively, e.g. exclude[]=1&exclude[]=2&exclude[]=course_3""" if exclude is not None: params["exclude"] = exclude # OPTIONAL - type """Limit the search just to users or contexts (groups/courses).""" if type is not None: self._validate_enum(type, ["user", "context"]) params["type"] = type # OPTIONAL - user_id """Search for a specific user id. This ignores the other above parameters, and will never return more than one result.""" if user_id is not None: params["user_id"] = user_id # OPTIONAL - from_conversation_id """When searching by user_id, only users that could be normally messaged by this user will be returned. This parameter allows you to specify a conversation that will be referenced for a shared context -- if both the current user and the searched user are in the conversation, the user will be returned. This is used to start new side conversations.""" if from_conversation_id is not None: params["from_conversation_id"] = from_conversation_id # OPTIONAL - permissions """Array of permission strings to be checked for each matched context (e.g. "send_messages"). This argument determines which permissions may be returned in the response; it won't prevent contexts from being returned if they don't grant the permission(s).""" if permissions is not None: params["permissions"] = permissions self.logger.debug("GET /api/v1/conversations/find_recipients with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/conversations/find_recipients".format(**path), data=data, params=params, no_data=True)
Find recipients. Find valid recipients (users, courses and groups) that the current user can send messages to. The /api/v1/search/recipients path is the preferred endpoint, /api/v1/conversations/find_recipients is deprecated. Pagination is supported.
def _get_full_model_smt_script(self, constraints=(), variables=()): """ Returns a SMT script that declare all the symbols and constraint and checks their satisfiability (check-sat) :param extra-constraints: list of extra constraints that we want to evaluate only in the scope of this call :return string: smt-lib representation of the script that checks the satisfiability """ smt_script = '(set-logic ALL)\n' smt_script += '(set-option :produce-models true)\n' smt_script += self._smtlib_exprs(variables) smt_script += self._smtlib_exprs(constraints) smt_script += '(check-sat)\n' smt_script += '(get-model)\n' return smt_script
Returns a SMT script that declare all the symbols and constraint and checks their satisfiability (check-sat) :param extra-constraints: list of extra constraints that we want to evaluate only in the scope of this call :return string: smt-lib representation of the script that checks the satisfiability
def frange(start, end, step): """Like range(), but with floats. """ val = start while val < end: yield val val += step
Like range(), but with floats.
def _get_line_styles(marker_str): """Return line style, color and marker type from specified marker string. For example, if ``marker_str`` is 'g-o' then the method returns ``('solid', 'green', 'circle')``. """ def _extract_marker_value(marker_str, code_dict): """Extracts the marker value from a given marker string. Looks up the `code_dict` and returns the corresponding marker for a specific code. For example if `marker_str` is 'g-o' then the method extracts - 'green' if the code_dict is color_codes, - 'circle' if the code_dict is marker_codes etc. """ val = None for code in code_dict: if code in marker_str: val = code_dict[code] break return val return [_extract_marker_value(marker_str, code_dict) for code_dict in [LINE_STYLE_CODES, COLOR_CODES, MARKER_CODES]]
Return line style, color and marker type from specified marker string. For example, if ``marker_str`` is 'g-o' then the method returns ``('solid', 'green', 'circle')``.
def get_coordinates_xyz(filename): """ Get coordinates from filename and return a vectorset with all the coordinates, in XYZ format. Parameters ---------- filename : string Filename to read Returns ------- atoms : list List of atomic types V : array (N,3) where N is number of atoms """ f = open(filename, 'r') V = list() atoms = list() n_atoms = 0 # Read the first line to obtain the number of atoms to read try: n_atoms = int(f.readline()) except ValueError: exit("error: Could not obtain the number of atoms in the .xyz file.") # Skip the title line f.readline() # Use the number of atoms to not read beyond the end of a file for lines_read, line in enumerate(f): if lines_read == n_atoms: break atom = re.findall(r'[a-zA-Z]+', line)[0] atom = atom.upper() numbers = re.findall(r'[-]?\d+\.\d*(?:[Ee][-\+]\d+)?', line) numbers = [float(number) for number in numbers] # The numbers are not valid unless we obtain exacly three if len(numbers) >= 3: V.append(np.array(numbers)[:3]) atoms.append(atom) else: exit("Reading the .xyz file failed in line {0}. Please check the format.".format(lines_read + 2)) f.close() atoms = np.array(atoms) V = np.array(V) return atoms, V
Get coordinates from filename and return a vectorset with all the coordinates, in XYZ format. Parameters ---------- filename : string Filename to read Returns ------- atoms : list List of atomic types V : array (N,3) where N is number of atoms
def setExpandedIcon( self, column, icon ): """ Sets the icon to be used when the item is expanded. :param column | <int> icon | <QtGui.QIcon> || None """ self._expandedIcon[column] = QtGui.QIcon(icon)
Sets the icon to be used when the item is expanded. :param column | <int> icon | <QtGui.QIcon> || None
def get_checking_block(self): """Workaround for parity https://github.com/paritytech/parity-ethereum/issues/9707 In parity doing any call() with the 'pending' block no longer falls back to the latest if no pending block is found but throws a mistaken error. Until that bug is fixed we need to enforce special behaviour for parity and use the latest block for checking. """ checking_block = 'pending' if self.eth_node is constants.EthClient.PARITY: checking_block = 'latest' return checking_block
Workaround for parity https://github.com/paritytech/parity-ethereum/issues/9707 In parity doing any call() with the 'pending' block no longer falls back to the latest if no pending block is found but throws a mistaken error. Until that bug is fixed we need to enforce special behaviour for parity and use the latest block for checking.
def pause(vm_): ''' Pause the named vm CLI Example: .. code-block:: bash salt '*' virt.pause <vm name> ''' with _get_xapi_session() as xapi: vm_uuid = _get_label_uuid(xapi, 'VM', vm_) if vm_uuid is False: return False try: xapi.VM.pause(vm_uuid) return True except Exception: return False
Pause the named vm CLI Example: .. code-block:: bash salt '*' virt.pause <vm name>
def ensure_dir_exists(directory): """Se asegura de que un directorio exista.""" if directory and not os.path.exists(directory): os.makedirs(directory)
Se asegura de que un directorio exista.
def setitem_via_pathlist(ol,value,pathlist): ''' from elist.elist import * y = ['a',['b',["bb"]],'c'] y[1][1] setitem_via_pathlist(y,"500",[1,1]) y ''' this = ol for i in range(0,pathlist.__len__()-1): key = pathlist[i] this = this.__getitem__(key) this.__setitem__(pathlist[-1],value) return(ol)
from elist.elist import * y = ['a',['b',["bb"]],'c'] y[1][1] setitem_via_pathlist(y,"500",[1,1]) y
def connect(reactor, control_endpoint=None, password_function=None): """ Creates a :class:`txtorcon.Tor` instance by connecting to an already-running tor's control port. For example, a common default tor uses is UNIXClientEndpoint(reactor, '/var/run/tor/control') or TCP4ClientEndpoint(reactor, 'localhost', 9051) If only password authentication is available in the tor we connect to, the ``password_function`` is called (if supplied) to retrieve a valid password. This function can return a Deferred. For example:: import txtorcon from twisted.internet.task import react from twisted.internet.defer import inlineCallbacks @inlineCallbacks def main(reactor): tor = yield txtorcon.connect( TCP4ClientEndpoint(reactor, "localhost", 9051) ) state = yield tor.create_state() for circuit in state.circuits: print(circuit) :param control_endpoint: None, an IStreamClientEndpoint to connect to, or a Sequence of IStreamClientEndpoint instances to connect to. If None, a list of defaults are tried. :param password_function: See :class:`txtorcon.TorControlProtocol` :return: a Deferred that fires with a :class:`txtorcon.Tor` instance """ @inlineCallbacks def try_endpoint(control_ep): assert IStreamClientEndpoint.providedBy(control_ep) proto = yield control_ep.connect( TorProtocolFactory( password_function=password_function ) ) config = yield TorConfig.from_protocol(proto) tor = Tor(reactor, proto, _tor_config=config) returnValue(tor) if control_endpoint is None: to_try = [ UNIXClientEndpoint(reactor, '/var/run/tor/control'), TCP4ClientEndpoint(reactor, '127.0.0.1', 9051), TCP4ClientEndpoint(reactor, '127.0.0.1', 9151), ] elif IStreamClientEndpoint.providedBy(control_endpoint): to_try = [control_endpoint] elif isinstance(control_endpoint, Sequence): to_try = control_endpoint for ep in control_endpoint: if not IStreamClientEndpoint.providedBy(ep): raise ValueError( "For control_endpoint=, '{}' must provide" " IStreamClientEndpoint".format(ep) ) else: raise ValueError( "For control_endpoint=, '{}' must provide" " IStreamClientEndpoint".format(control_endpoint) ) errors = [] for idx, ep in enumerate(to_try): try: tor = yield try_endpoint(ep) txtorlog.msg("Connected via '{}'".format(ep)) returnValue(tor) except Exception as e: errors.append(e) if len(errors) == 1: raise errors[0] raise RuntimeError( 'Failed to connect to: {}'.format( ', '.join( '{}: {}'.format(ep, err) for ep, err in zip(to_try, errors) ) ) )
Creates a :class:`txtorcon.Tor` instance by connecting to an already-running tor's control port. For example, a common default tor uses is UNIXClientEndpoint(reactor, '/var/run/tor/control') or TCP4ClientEndpoint(reactor, 'localhost', 9051) If only password authentication is available in the tor we connect to, the ``password_function`` is called (if supplied) to retrieve a valid password. This function can return a Deferred. For example:: import txtorcon from twisted.internet.task import react from twisted.internet.defer import inlineCallbacks @inlineCallbacks def main(reactor): tor = yield txtorcon.connect( TCP4ClientEndpoint(reactor, "localhost", 9051) ) state = yield tor.create_state() for circuit in state.circuits: print(circuit) :param control_endpoint: None, an IStreamClientEndpoint to connect to, or a Sequence of IStreamClientEndpoint instances to connect to. If None, a list of defaults are tried. :param password_function: See :class:`txtorcon.TorControlProtocol` :return: a Deferred that fires with a :class:`txtorcon.Tor` instance
def add_rule(self, ip_protocol, from_port, to_port, src_group_name, src_group_owner_id, cidr_ip): """ Add a rule to the SecurityGroup object. Note that this method only changes the local version of the object. No information is sent to EC2. """ rule = IPPermissions(self) rule.ip_protocol = ip_protocol rule.from_port = from_port rule.to_port = to_port self.rules.append(rule) rule.add_grant(src_group_name, src_group_owner_id, cidr_ip)
Add a rule to the SecurityGroup object. Note that this method only changes the local version of the object. No information is sent to EC2.
def check_column_existence(col_name, df, presence=True): """ Checks whether or not `col_name` is in `df` and raises a helpful error msg if the desired condition is not met. Parameters ---------- col_name : str. Should represent a column whose presence in `df` is to be checked. df : pandas DataFrame. The dataframe that will be checked for the presence of `col_name`. presence : bool, optional. If True, then this function checks for the PRESENCE of `col_name` from `df`. If False, then this function checks for the ABSENCE of `col_name` in `df`. Default == True. Returns ------- None. """ if presence: if col_name not in df.columns: msg = "Ensure that `{}` is in `df.columns`." raise ValueError(msg.format(col_name)) else: if col_name in df.columns: msg = "Ensure that `{}` is not in `df.columns`." raise ValueError(msg.format(col_name)) return None
Checks whether or not `col_name` is in `df` and raises a helpful error msg if the desired condition is not met. Parameters ---------- col_name : str. Should represent a column whose presence in `df` is to be checked. df : pandas DataFrame. The dataframe that will be checked for the presence of `col_name`. presence : bool, optional. If True, then this function checks for the PRESENCE of `col_name` from `df`. If False, then this function checks for the ABSENCE of `col_name` in `df`. Default == True. Returns ------- None.
def set_reload_params( self, min_lifetime=None, max_lifetime=None, max_requests=None, max_requests_delta=None, max_addr_space=None, max_rss=None, max_uss=None, max_pss=None, max_addr_space_forced=None, max_rss_forced=None, watch_interval_forced=None, mercy=None): """Sets workers reload parameters. :param int min_lifetime: A worker cannot be destroyed/reloaded unless it has been alive for N seconds (default 60). This is an anti-fork-bomb measure. Since 1.9 :param int max_lifetime: Reload workers after this many seconds. Disabled by default. Since 1.9 :param int max_requests: Reload workers after the specified amount of managed requests (avoid memory leaks). When a worker reaches this number of requests it will get recycled (killed and restarted). You can use this option to "dumb fight" memory leaks. Also take a look at the ``reload-on-as`` and ``reload-on-rss`` options as they are more useful for memory leaks. .. warning:: The default min-worker-lifetime 60 seconds takes priority over `max-requests`. Do not use with benchmarking as you'll get stalls such as `worker respawning too fast !!! i have to sleep a bit (2 seconds)...` :param int max_requests_delta: Add (worker_id * delta) to the max_requests value of each worker. :param int max_addr_space: Reload a worker if its address space usage is higher than the specified value in megabytes. :param int max_rss: Reload a worker if its physical unshared memory (resident set size) is higher than the specified value (in megabytes). :param int max_uss: Reload a worker if Unique Set Size is higher than the specified value in megabytes. .. note:: Linux only. :param int max_pss: Reload a worker if Proportional Set Size is higher than the specified value in megabytes. .. note:: Linux only. :param int max_addr_space_forced: Force the master to reload a worker if its address space is higher than specified megabytes (in megabytes). :param int max_rss_forced: Force the master to reload a worker if its resident set size memory is higher than specified in megabytes. :param int watch_interval_forced: The memory collector [per-worker] thread memeory watch interval (seconds) used for forced reloads. Default: 3. :param int mercy: Set the maximum time (in seconds) a worker can take before reload/shutdown. Default: 60. """ self._set('max-requests', max_requests) self._set('max-requests-delta', max_requests_delta) self._set('min-worker-lifetime', min_lifetime) self._set('max-worker-lifetime', max_lifetime) self._set('reload-on-as', max_addr_space) self._set('reload-on-rss', max_rss) self._set('reload-on-uss', max_uss) self._set('reload-on-pss', max_pss) self._set('evil-reload-on-as', max_addr_space_forced) self._set('evil-reload-on-rss', max_rss_forced) self._set('mem-collector-freq', watch_interval_forced) self._set('worker-reload-mercy', mercy) return self._section
Sets workers reload parameters. :param int min_lifetime: A worker cannot be destroyed/reloaded unless it has been alive for N seconds (default 60). This is an anti-fork-bomb measure. Since 1.9 :param int max_lifetime: Reload workers after this many seconds. Disabled by default. Since 1.9 :param int max_requests: Reload workers after the specified amount of managed requests (avoid memory leaks). When a worker reaches this number of requests it will get recycled (killed and restarted). You can use this option to "dumb fight" memory leaks. Also take a look at the ``reload-on-as`` and ``reload-on-rss`` options as they are more useful for memory leaks. .. warning:: The default min-worker-lifetime 60 seconds takes priority over `max-requests`. Do not use with benchmarking as you'll get stalls such as `worker respawning too fast !!! i have to sleep a bit (2 seconds)...` :param int max_requests_delta: Add (worker_id * delta) to the max_requests value of each worker. :param int max_addr_space: Reload a worker if its address space usage is higher than the specified value in megabytes. :param int max_rss: Reload a worker if its physical unshared memory (resident set size) is higher than the specified value (in megabytes). :param int max_uss: Reload a worker if Unique Set Size is higher than the specified value in megabytes. .. note:: Linux only. :param int max_pss: Reload a worker if Proportional Set Size is higher than the specified value in megabytes. .. note:: Linux only. :param int max_addr_space_forced: Force the master to reload a worker if its address space is higher than specified megabytes (in megabytes). :param int max_rss_forced: Force the master to reload a worker if its resident set size memory is higher than specified in megabytes. :param int watch_interval_forced: The memory collector [per-worker] thread memeory watch interval (seconds) used for forced reloads. Default: 3. :param int mercy: Set the maximum time (in seconds) a worker can take before reload/shutdown. Default: 60.
def set_callbacks(self, **dic_functions): """Register callbacks needed by the interface object""" for action in self.interface.CALLBACKS: try: f = dic_functions[action] except KeyError: pass else: setattr(self.interface.callbacks, action, f) manquantes = [ a for a in self.interface.CALLBACKS if not a in dic_functions] if not manquantes: logging.debug( f"{self.__class__.__name__} : Tous les callbacks demandés sont fournis.") else: logging.warning( f"{self.__class__.__name__} didn't set asked callbacks {manquantes}")
Register callbacks needed by the interface object
def clear_file(self, label): """stub""" rm = self.my_osid_object_form._get_provider_manager('REPOSITORY') catalog_id_str = '' if 'assignedBankIds' in self.my_osid_object_form._my_map: catalog_id_str = self.my_osid_object_form._my_map['assignedBankIds'][0] elif 'assignedRepositoryIds' in self.my_osid_object_form._my_map: catalog_id_str = self.my_osid_object_form._my_map['assignedRepositoryIds'][0] try: try: aas = rm.get_asset_admin_session_for_repository( Id(catalog_id_str), self.my_osid_object_form._proxy) except NullArgument: aas = rm.get_asset_admin_session_for_repository( Id(catalog_id_str)) except AttributeError: # for update forms try: aas = rm.get_asset_admin_session_for_repository( Id(catalog_id_str), self.my_osid_object_form._proxy) except NullArgument: aas = rm.get_asset_admin_session_for_repository( Id(catalog_id_str)) if label not in self.my_osid_object_form._my_map['fileIds']: raise NotFound() aas.delete_asset(Id(self.my_osid_object_form._my_map['fileIds'][label]['assetId'])) del self.my_osid_object_form._my_map['fileIds'][label]
stub
def summed_probabilities(self, choosers, alternatives): """ Calculate total probability associated with each alternative. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Total probability associated with each alternative. """ def normalize(s): return s / s.sum() choosers, alternatives = self.apply_predict_filters( choosers, alternatives) probs = self.probabilities(choosers, alternatives, filter_tables=False) # groupby the the alternatives ID and sum if self.probability_mode == 'single_chooser': return ( normalize(probs) * len(choosers) ).reset_index(level=0, drop=True) elif self.probability_mode == 'full_product': return probs.groupby(level=0).apply(normalize)\ .groupby(level=1).sum() else: raise ValueError( 'Unrecognized probability_mode option: {}'.format( self.probability_mode))
Calculate total probability associated with each alternative. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Total probability associated with each alternative.
def king(self, color: Color) -> Optional[Square]: """ Finds the king square of the given side. Returns ``None`` if there is no king of that color. In variants with king promotions, only non-promoted kings are considered. """ king_mask = self.occupied_co[color] & self.kings & ~self.promoted return msb(king_mask) if king_mask else None
Finds the king square of the given side. Returns ``None`` if there is no king of that color. In variants with king promotions, only non-promoted kings are considered.
def check_activation(self, contacts): """Enter or exit downtime if necessary :return: None """ now = time.time() was_is_in_effect = self.is_in_effect self.is_in_effect = (self.start_time <= now <= self.end_time) # Raise a log entry when we get in the downtime if not was_is_in_effect and self.is_in_effect: self.enter(contacts) # Same for exit purpose if was_is_in_effect and not self.is_in_effect: self.exit(contacts)
Enter or exit downtime if necessary :return: None
def socket(type, filename, line_nbr): """ Get a new ZMQ socket, automagically creating a ZMQ context if this is the first time. Caller is responsible for destroying the ZMQ socket before process exits, to avoid a ZMQ deadlock. Note: you should not use this method in CZMQ apps, use zsock_new() instead. *** This is for CZMQ internal use only and may change arbitrarily *** """ return c_void_p(lib.zsys_socket(type, filename, line_nbr))
Get a new ZMQ socket, automagically creating a ZMQ context if this is the first time. Caller is responsible for destroying the ZMQ socket before process exits, to avoid a ZMQ deadlock. Note: you should not use this method in CZMQ apps, use zsock_new() instead. *** This is for CZMQ internal use only and may change arbitrarily ***
def encode(cls, value): """Encodes a value into bencoded bytes. :param value: Python object to be encoded (str, int, list, dict). :param str val_encoding: Encoding used by strings in a given object. :rtype: bytes """ val_encoding = 'utf-8' def encode_str(v): try: v_enc = encode(v, val_encoding) except UnicodeDecodeError: if PY3: raise else: # Suppose bytestring v_enc = v prefix = encode('%s:' % len(v_enc), val_encoding) return prefix + v_enc def encode_(val): if isinstance(val, str_type): result = encode_str(val) elif isinstance(val, int_types): result = encode(('i%se' % val), val_encoding) elif isinstance(val, (list, set, tuple)): result = encode('l', val_encoding) for item in val: result += encode_(item) result += encode('e', val_encoding) elif isinstance(val, dict): result = encode('d', val_encoding) # Dictionaries are expected to be sorted by key. for k, v in OrderedDict(sorted(val.items(), key=itemgetter(0))).items(): result += (encode_str(k) + encode_(v)) result += encode('e', val_encoding) elif isinstance(val, byte_types): result = encode('%s:' % len(val), val_encoding) result += val else: raise BencodeEncodingError('Unable to encode `%s` %s' % (type(val), val)) return result return encode_(value)
Encodes a value into bencoded bytes. :param value: Python object to be encoded (str, int, list, dict). :param str val_encoding: Encoding used by strings in a given object. :rtype: bytes
def register(self, name, description, obj, plugin): """ Registers a new shared object. :param name: Unique name for shared object :type name: str :param description: Description of shared object :type description: str :param obj: The object, which shall be shared :type obj: any type :param plugin: Plugin, which registers the new shared object """ if name in self._shared_objects.keys(): raise SharedObjectExistException("Shared Object %s already registered by %s" % (name, self._shared_objects[name].plugin.name)) new_shared_object = SharedObject(name, description, obj, plugin) self._shared_objects[name] = new_shared_object self.log.debug("Shared object registered: %s" % name) return new_shared_object
Registers a new shared object. :param name: Unique name for shared object :type name: str :param description: Description of shared object :type description: str :param obj: The object, which shall be shared :type obj: any type :param plugin: Plugin, which registers the new shared object
def get_context_data(self, **kwargs): """Tests cookies. """ self.request.session.set_test_cookie() if not self.request.session.test_cookie_worked(): messages.add_message( self.request, messages.ERROR, "Please enable cookies.") self.request.session.delete_test_cookie() return super().get_context_data(**kwargs)
Tests cookies.
def parse(cls, data): """ Parse a Config structure out of a Python dict (that's likely deserialized from YAML). :param data: Config-y dict :type data: dict :return: Config object :rtype: valohai_yaml.objs.Config """ parsers = { 'step': ([], Step.parse), 'endpoint': ([], Endpoint.parse), } for datum in data: assert isinstance(datum, dict) for type, (items, parse) in parsers.items(): if type in datum: items.append(parse(datum[type])) break else: raise ValueError('No parser for {0}'.format(datum)) inst = cls( steps=parsers['step'][0], endpoints=parsers['endpoint'][0], ) inst._original_data = data return inst
Parse a Config structure out of a Python dict (that's likely deserialized from YAML). :param data: Config-y dict :type data: dict :return: Config object :rtype: valohai_yaml.objs.Config
def inv_diagonal(S): """ Computes the inverse of a diagonal NxN np.array S. In general this will be much faster than calling np.linalg.inv(). However, does NOT check if the off diagonal elements are non-zero. So long as S is truly diagonal, the output is identical to np.linalg.inv(). Parameters ---------- S : np.array diagonal NxN array to take inverse of Returns ------- S_inv : np.array inverse of S Examples -------- This is meant to be used as a replacement inverse function for the KalmanFilter class when you know the system covariance S is diagonal. It just makes the filter run faster, there is >>> kf = KalmanFilter(dim_x=3, dim_z=1) >>> kf.inv = inv_diagonal # S is 1x1, so safely diagonal """ S = np.asarray(S) if S.ndim != 2 or S.shape[0] != S.shape[1]: raise ValueError('S must be a square Matrix') si = np.zeros(S.shape) for i in range(len(S)): si[i, i] = 1. / S[i, i] return si
Computes the inverse of a diagonal NxN np.array S. In general this will be much faster than calling np.linalg.inv(). However, does NOT check if the off diagonal elements are non-zero. So long as S is truly diagonal, the output is identical to np.linalg.inv(). Parameters ---------- S : np.array diagonal NxN array to take inverse of Returns ------- S_inv : np.array inverse of S Examples -------- This is meant to be used as a replacement inverse function for the KalmanFilter class when you know the system covariance S is diagonal. It just makes the filter run faster, there is >>> kf = KalmanFilter(dim_x=3, dim_z=1) >>> kf.inv = inv_diagonal # S is 1x1, so safely diagonal
def inspect_container(self, container): """ Identical to the `docker inspect` command, but only for containers. Args: container (str): The container to inspect Returns: (dict): Similar to the output of `docker inspect`, but as a single dict Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ return self._result( self._get(self._url("/containers/{0}/json", container)), True )
Identical to the `docker inspect` command, but only for containers. Args: container (str): The container to inspect Returns: (dict): Similar to the output of `docker inspect`, but as a single dict Raises: :py:class:`docker.errors.APIError` If the server returns an error.
def solve_status_str(hdrlbl, fmtmap=None, fwdth0=4, fwdthdlt=6, fprec=2): """Construct header and format details for status display of an iterative solver. Parameters ---------- hdrlbl : tuple of strings Tuple of field header strings fmtmap : dict or None, optional (default None) A dict providing a mapping from field header strings to print format strings, providing a mechanism for fields with print formats that depart from the standard format fwdth0 : int, optional (default 4) Number of characters in first field formatted for integers fwdthdlt : int, optional (default 6) The width of fields formatted for floats is the sum of the value of this parameter and the field precision fprec : int, optional (default 2) Precision of fields formatted for floats Returns ------- hdrstr : string Complete header string fmtstr : string Complete print formatting string for numeric values nsep : integer Number of characters in separator string """ if fmtmap is None: fmtmap = {} fwdthn = fprec + fwdthdlt # Construct a list specifying the format string for each field. # Use format string from fmtmap if specified, otherwise use # a %d specifier with field width fwdth0 for the first field, # or a %e specifier with field width fwdthn and precision # fprec fldfmt = [fmtmap[lbl] if lbl in fmtmap else (('%%%dd' % (fwdth0)) if idx == 0 else (('%%%d.%de' % (fwdthn, fprec)))) for idx, lbl in enumerate(hdrlbl)] fmtstr = (' ').join(fldfmt) # Construct a list of field widths for each field by extracting # field widths from field format strings cre = re.compile(r'%-?(\d+)') fldwid = [] for fmt in fldfmt: mtch = cre.match(fmt) if mtch is None: raise ValueError("Format string '%s' does not contain field " "width" % fmt) else: fldwid.append(int(mtch.group(1))) # Construct list of field header strings formatted to the # appropriate field width, and join to construct a combined field # header string hdrlst = [('%-*s' % (w, t)) for t, w in zip(hdrlbl, fldwid)] hdrstr = (' ').join(hdrlst) return hdrstr, fmtstr, len(hdrstr)
Construct header and format details for status display of an iterative solver. Parameters ---------- hdrlbl : tuple of strings Tuple of field header strings fmtmap : dict or None, optional (default None) A dict providing a mapping from field header strings to print format strings, providing a mechanism for fields with print formats that depart from the standard format fwdth0 : int, optional (default 4) Number of characters in first field formatted for integers fwdthdlt : int, optional (default 6) The width of fields formatted for floats is the sum of the value of this parameter and the field precision fprec : int, optional (default 2) Precision of fields formatted for floats Returns ------- hdrstr : string Complete header string fmtstr : string Complete print formatting string for numeric values nsep : integer Number of characters in separator string
def history(self, first=0, last=0, limit=-1, only_ops=[], exclude_ops=[]): """ Returns a generator for individual account transactions. The latest operation will be first. This call can be used in a ``for`` loop. :param int first: sequence number of the first transaction to return (*optional*) :param int last: sequence number of the last transaction to return (*optional*) :param int limit: limit number of transactions to return (*optional*) :param array only_ops: Limit generator by these operations (*optional*) :param array exclude_ops: Exclude these operations from generator (*optional*). ... note:: only_ops and exclude_ops takes an array of strings: The full list of operation ID's can be found in operationids.py. Example: ['transfer', 'fill_order'] """ _limit = 100 cnt = 0 if first < 0: first = 0 while True: # RPC call txs = self.blockchain.rpc.get_account_history( self["id"], "1.11.{}".format(last), _limit, "1.11.{}".format(first - 1), api="history", ) for i in txs: if ( exclude_ops and self.operations.getOperationNameForId(i["op"][0]) in exclude_ops ): continue if ( not only_ops or self.operations.getOperationNameForId(i["op"][0]) in only_ops ): cnt += 1 yield i if limit >= 0 and cnt >= limit: # pragma: no cover return if not txs: log.info("No more history returned from API node") break if len(txs) < _limit: log.info("Less than {} have been returned.".format(_limit)) break first = int(txs[-1]["id"].split(".")[2])
Returns a generator for individual account transactions. The latest operation will be first. This call can be used in a ``for`` loop. :param int first: sequence number of the first transaction to return (*optional*) :param int last: sequence number of the last transaction to return (*optional*) :param int limit: limit number of transactions to return (*optional*) :param array only_ops: Limit generator by these operations (*optional*) :param array exclude_ops: Exclude these operations from generator (*optional*). ... note:: only_ops and exclude_ops takes an array of strings: The full list of operation ID's can be found in operationids.py. Example: ['transfer', 'fill_order']
def is_builtin(text): """Test if passed string is the name of a Python builtin object""" from spyder.py3compat import builtins return text in [str(name) for name in dir(builtins) if not name.startswith('_')]
Test if passed string is the name of a Python builtin object
def get_config(): ''' Get the current DSC Configuration Returns: dict: A dictionary representing the DSC Configuration on the machine Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.get_config ''' cmd = 'Get-DscConfiguration | Select-Object * -ExcludeProperty Cim*' try: raw_config = _pshell(cmd, ignore_retcode=True) except CommandExecutionError as exc: if 'Current configuration does not exist' in exc.info['stderr']: raise CommandExecutionError('Not Configured') raise config = dict() if raw_config: # Get DSC Configuration Name if 'ConfigurationName' in raw_config[0]: config[raw_config[0]['ConfigurationName']] = {} # Add all DSC Configurations by ResourceId for item in raw_config: config[item['ConfigurationName']][item['ResourceId']] = {} for key in item: if key not in ['ConfigurationName', 'ResourceId']: config[item['ConfigurationName']][item['ResourceId']][key] = item[key] return config
Get the current DSC Configuration Returns: dict: A dictionary representing the DSC Configuration on the machine Raises: CommandExecutionError: On failure CLI Example: .. code-block:: bash salt '*' dsc.get_config
def _get_generic_two_antidep_episodes_result( rowdata: Tuple[Any, ...] = None) -> DataFrame: """ Create a results row for this application. """ # Valid data types... see: # - pandas.core.dtypes.common.pandas_dtype # - https://pandas.pydata.org/pandas-docs/stable/timeseries.html # - https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.datetime.html data = [rowdata] if rowdata else [] return DataFrame(array( data, # data dtype=[ # column definitions: (RCN_PATIENT_ID, DTYPE_STRING), (RCN_DRUG_A_NAME, DTYPE_STRING), (RCN_DRUG_A_FIRST_MENTION, DTYPE_DATE), (RCN_DRUG_A_SECOND_MENTION, DTYPE_DATE), (RCN_DRUG_B_NAME, DTYPE_STRING), (RCN_DRUG_B_FIRST_MENTION, DTYPE_DATE), (RCN_DRUG_B_SECOND_MENTION, DTYPE_DATE), (RCN_EXPECT_RESPONSE_BY_DATE, DTYPE_DATE), (RCN_END_OF_SYMPTOM_PERIOD, DTYPE_DATE), ] ))
Create a results row for this application.
def vcsNodeState_originator_switch_info_switchIdentifier(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs") originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info") switchIdentifier = ET.SubElement(originator_switch_info, "switchIdentifier") switchIdentifier.text = kwargs.pop('switchIdentifier') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _kalman_prediction_step_SVD(k, p_m , p_P, p_dyn_model_callable, calc_grad_log_likelihood=False, p_dm = None, p_dP = None): """ Desctrete prediction function Input: k:int Iteration No. Starts at 0. Total number of iterations equal to the number of measurements. p_m: matrix of size (state_dim, time_series_no) Mean value from the previous step. For "multiple time series mode" it is matrix, second dimension of which correspond to different time series. p_P: tuple (Prev_cov, S, V) Covariance matrix from the previous step and its SVD decomposition. Prev_cov = V * S * V.T The tuple is (Prev_cov, S, V) p_dyn_model_callable: object calc_grad_log_likelihood: boolean Whether to calculate gradient of the marginal likelihood of the state-space model. If true then the next parameter must provide the extra parameters for gradient calculation. p_dm: 3D array (state_dim, time_series_no, parameters_no) Mean derivatives from the previous step. For "multiple time series mode" it is 3D array, second dimension of which correspond to different time series. p_dP: 3D array (state_dim, state_dim, parameters_no) Mean derivatives from the previous step Output: ---------------------------- m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects Results of the prediction steps. """ # covariance from the previous step and its SVD decomposition # p_prev_cov = v * S * V.T Prev_cov, S_old, V_old = p_P #p_prev_cov_tst = np.dot(p_V, (p_S * p_V).T) # reconstructed covariance from the previous step # index correspond to values from previous iteration. A = p_dyn_model_callable.Ak(k,p_m,Prev_cov) # state transition matrix (or Jacobian) Q = p_dyn_model_callable.Qk(k) # state noise matrx. This is necessary for the square root calculation (next step) Q_sr = p_dyn_model_callable.Q_srk(k) # Prediction step -> m_pred = p_dyn_model_callable.f_a(k, p_m, A) # predicted mean # coavariance prediction have changed: svd_1_matr = np.vstack( ( (np.sqrt(S_old)* np.dot(A,V_old)).T , Q_sr.T) ) (U,S,Vh) = sp.linalg.svd( svd_1_matr,full_matrices=False, compute_uv=True, overwrite_a=False,check_finite=True) # predicted variance computed by the regular method. For testing #P_pred_tst = A.dot(Prev_cov).dot(A.T) + Q V_new = Vh.T S_new = S**2 P_pred = np.dot(V_new * S_new, V_new.T) # prediction covariance P_pred = (P_pred, S_new, Vh.T) # Prediction step <- # derivatives if calc_grad_log_likelihood: dA_all_params = p_dyn_model_callable.dAk(k) # derivatives of A wrt parameters dQ_all_params = p_dyn_model_callable.dQk(k) # derivatives of Q wrt parameters param_number = p_dP.shape[2] # p_dm, p_dP - derivatives form the previoius step dm_pred = np.empty(p_dm.shape) dP_pred = np.empty(p_dP.shape) for j in range(param_number): dA = dA_all_params[:,:,j] dQ = dQ_all_params[:,:,j] #dP = p_dP[:,:,j] #dm = p_dm[:,:,j] dm_pred[:,:,j] = np.dot(dA, p_m) + np.dot(A, p_dm[:,:,j]) # prediction step derivatives for current parameter: dP_pred[:,:,j] = np.dot( dA ,np.dot(Prev_cov, A.T)) dP_pred[:,:,j] += dP_pred[:,:,j].T dP_pred[:,:,j] += np.dot( A ,np.dot(p_dP[:,:,j], A.T)) + dQ dP_pred[:,:,j] = 0.5*(dP_pred[:,:,j] + dP_pred[:,:,j].T) #symmetrize else: dm_pred = None dP_pred = None return m_pred, P_pred, dm_pred, dP_pred
Desctrete prediction function Input: k:int Iteration No. Starts at 0. Total number of iterations equal to the number of measurements. p_m: matrix of size (state_dim, time_series_no) Mean value from the previous step. For "multiple time series mode" it is matrix, second dimension of which correspond to different time series. p_P: tuple (Prev_cov, S, V) Covariance matrix from the previous step and its SVD decomposition. Prev_cov = V * S * V.T The tuple is (Prev_cov, S, V) p_dyn_model_callable: object calc_grad_log_likelihood: boolean Whether to calculate gradient of the marginal likelihood of the state-space model. If true then the next parameter must provide the extra parameters for gradient calculation. p_dm: 3D array (state_dim, time_series_no, parameters_no) Mean derivatives from the previous step. For "multiple time series mode" it is 3D array, second dimension of which correspond to different time series. p_dP: 3D array (state_dim, state_dim, parameters_no) Mean derivatives from the previous step Output: ---------------------------- m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects Results of the prediction steps.
def connect_signals(self, target): """ This is deprecated. Pass your controller to connect signals the old way. """ if self.connected: raise RuntimeError("GtkBuilder can only connect signals once") self.builder.connect_signals(target) self.connected = True
This is deprecated. Pass your controller to connect signals the old way.
def init_printing(*, reset=False, init_sympy=True, **kwargs): """Initialize the printing system. This determines the behavior of the :func:`ascii`, :func:`unicode`, and :func:`latex` functions, as well as the ``__str__`` and ``__repr__`` of any :class:`.Expression`. The routine may be called in one of two forms. First, :: init_printing( str_format=<str_fmt>, repr_format=<repr_fmt>, caching=<use_caching>, **settings) provides a simplified, "manual" setup with the following parameters. Args: str_format (str): Format for ``__str__`` representation of an :class:`.Expression`. One of 'ascii', 'unicode', 'latex', 'srepr', 'indsrepr' ("indented `srepr`"), or 'tree'. The string representation will be affected by the settings for the corresponding print routine, e.g. :func:`unicode` for ``str_format='unicode'`` repr_format (str): Like `str_format`, but for ``__repr__``. This is what gets displayed in an interactive (I)Python session. caching (bool): By default, the printing functions (:func:`ascii`, :func:`unicode`, :func:`latex`) cache their result for any expression and sub-expression. This is both for efficiency and to give the ability to to supply custom strings for subexpression by passing a `cache` parameter to the printing functions. Initializing the printing system with ``caching=False`` disables this possibility. settings: Any setting understood by any of the printing routines. Second, :: init_printing(inifile=<path_to_file>) allows for more detailed settings through a config file, see the :ref:`notes on using an INI file <ini_file_printing>`. If `str_format` or `repr_format` are not given, they will be set to 'unicode' if the current terminal is known to support an UTF8 (accordig to ``sys.stdout.encoding``), and 'ascii' otherwise. Generally, :func:`init_printing` should be called only once at the beginning of a script or notebook. If it is called multiple times, any settings accumulate. To avoid this and to reset the printing system to the defaults, you may pass ``reset=True``. In a Jupyter notebook, expressions are rendered graphically via LaTeX, using the settings as they affect the :func:`latex` printer. The :func:`sympy.init_printing()` routine is called automatically, unless `init_sympy` is given as ``False``. See also: :func:`configure_printing` allows to temporarily change the printing system from what was configured in :func:`init_printing`. """ # return either None (default) or a dict of frozen attributes if # ``_freeze=True`` is given as a keyword argument (internal use in # `configure_printing` only) logger = logging.getLogger(__name__) if reset: SympyPrinter._global_settings = {} if init_sympy: if kwargs.get('repr_format', '') == 'unicode': sympy_init_printing(use_unicode=True) if kwargs.get('repr_format', '') == 'ascii': sympy_init_printing(use_unicode=False) else: sympy_init_printing() # let sympy decide by itself if 'inifile' in kwargs: invalid_kwargs = False if '_freeze' in kwargs: _freeze = kwargs['_freeze'] if len(kwargs) != 2: invalid_kwargs = True else: _freeze = False if len(kwargs) != 1: invalid_kwargs = True if invalid_kwargs: raise TypeError( "The `inifile` argument cannot be combined with any " "other keyword arguments") logger.debug( "Initializating printing from INI file %s", kwargs['inifile']) return _init_printing_from_file(kwargs['inifile'], _freeze=_freeze) else: logger.debug( "Initializating printing with direct settings: %s", repr(kwargs)) return _init_printing(**kwargs)
Initialize the printing system. This determines the behavior of the :func:`ascii`, :func:`unicode`, and :func:`latex` functions, as well as the ``__str__`` and ``__repr__`` of any :class:`.Expression`. The routine may be called in one of two forms. First, :: init_printing( str_format=<str_fmt>, repr_format=<repr_fmt>, caching=<use_caching>, **settings) provides a simplified, "manual" setup with the following parameters. Args: str_format (str): Format for ``__str__`` representation of an :class:`.Expression`. One of 'ascii', 'unicode', 'latex', 'srepr', 'indsrepr' ("indented `srepr`"), or 'tree'. The string representation will be affected by the settings for the corresponding print routine, e.g. :func:`unicode` for ``str_format='unicode'`` repr_format (str): Like `str_format`, but for ``__repr__``. This is what gets displayed in an interactive (I)Python session. caching (bool): By default, the printing functions (:func:`ascii`, :func:`unicode`, :func:`latex`) cache their result for any expression and sub-expression. This is both for efficiency and to give the ability to to supply custom strings for subexpression by passing a `cache` parameter to the printing functions. Initializing the printing system with ``caching=False`` disables this possibility. settings: Any setting understood by any of the printing routines. Second, :: init_printing(inifile=<path_to_file>) allows for more detailed settings through a config file, see the :ref:`notes on using an INI file <ini_file_printing>`. If `str_format` or `repr_format` are not given, they will be set to 'unicode' if the current terminal is known to support an UTF8 (accordig to ``sys.stdout.encoding``), and 'ascii' otherwise. Generally, :func:`init_printing` should be called only once at the beginning of a script or notebook. If it is called multiple times, any settings accumulate. To avoid this and to reset the printing system to the defaults, you may pass ``reset=True``. In a Jupyter notebook, expressions are rendered graphically via LaTeX, using the settings as they affect the :func:`latex` printer. The :func:`sympy.init_printing()` routine is called automatically, unless `init_sympy` is given as ``False``. See also: :func:`configure_printing` allows to temporarily change the printing system from what was configured in :func:`init_printing`.
def _tta_only(learn:Learner, ds_type:DatasetType=DatasetType.Valid, scale:float=1.35) -> Iterator[List[Tensor]]: "Computes the outputs for several augmented inputs for TTA" dl = learn.dl(ds_type) ds = dl.dataset old = ds.tfms augm_tfm = [o for o in learn.data.train_ds.tfms if o.tfm not in (crop_pad, flip_lr, dihedral, zoom)] try: pbar = master_bar(range(8)) for i in pbar: row = 1 if i&1 else 0 col = 1 if i&2 else 0 flip = i&4 d = {'row_pct':row, 'col_pct':col, 'is_random':False} tfm = [*augm_tfm, zoom(scale=scale, **d), crop_pad(**d)] if flip: tfm.append(flip_lr(p=1.)) ds.tfms = tfm yield get_preds(learn.model, dl, pbar=pbar, activ=_loss_func2activ(learn.loss_func))[0] finally: ds.tfms = old
Computes the outputs for several augmented inputs for TTA
def rotateX(self, angle): """ Rotates the point around the X axis by the given angle in degrees. """ rad = angle * math.pi / 180 cosa = math.cos(rad) sina = math.sin(rad) y = self.y * cosa - self.z * sina z = self.y * sina + self.z * cosa return Point3D(self.x, y, z)
Rotates the point around the X axis by the given angle in degrees.
def get_assessment_part_form(self, *args, **kwargs): """Pass through to provider AssessmentPartAdminSession.get_assessment_part_form_for_update""" # This method might be a bit sketchy. Time will tell. if isinstance(args[-1], list) or 'assessment_part_record_types' in kwargs: return self.get_assessment_part_form_for_create(*args, **kwargs) else: return self.get_assessment_part_form_for_update(*args, **kwargs)
Pass through to provider AssessmentPartAdminSession.get_assessment_part_form_for_update
def _times_to_hours_after_local_midnight(times): """convert local pandas datetime indices to array of hours as floats""" times = times.tz_localize(None) hrs = 1 / NS_PER_HR * ( times.astype(np.int64) - times.normalize().astype(np.int64)) return np.array(hrs)
convert local pandas datetime indices to array of hours as floats
def assert_inbounds(num, low, high, msg='', eq=False, verbose=not util_arg.QUIET): r""" Args: num (scalar): low (scalar): high (scalar): msg (str): """ from utool import util_str if util_arg.NO_ASSERTS: return passed = util_alg.inbounds(num, low, high, eq=eq) if isinstance(passed, np.ndarray): passflag = np.all(passed) else: passflag = passed if not passflag: failednum = num.compress(~passed) if isinstance(num, np.ndarray) else num failedlow = low.compress(~passed) if isinstance(low, np.ndarray) else low failedhigh = high.compress(~passed) if isinstance(high, np.ndarray) else high msg_ = 'num=%r is out of bounds=(%r, %r)' % (failednum, failedlow, failedhigh) raise AssertionError(msg_ + '\n' + msg) else: if verbose: op = '<=' if eq else '<' fmtstr = 'Passed assert_inbounds: {low} {op} {num} {op} {high}' print(fmtstr.format(low=low, op=op, num=util_str.truncate_str(str(num)), high=high))
r""" Args: num (scalar): low (scalar): high (scalar): msg (str):
def from_coordinates(cls, coordinates, labels): """Initialize a similarity descriptor Arguments: coordinates -- a Nx3 numpy array labels -- a list with integer labels used to identify atoms of the same type """ from molmod.ext import molecules_distance_matrix distance_matrix = molecules_distance_matrix(coordinates) return cls(distance_matrix, labels)
Initialize a similarity descriptor Arguments: coordinates -- a Nx3 numpy array labels -- a list with integer labels used to identify atoms of the same type
def v(msg, *args, **kwargs): ''' log a message at verbose level; ''' return logging.log(VERBOSE, msg, *args, **kwargs)
log a message at verbose level;
def add_response_signature(self, response_data, sign_algorithm=OneLogin_Saml2_Constants.RSA_SHA1): """ Builds the Signature of the SAML Response. :param response_data: The Response parameters :type response_data: dict :param sign_algorithm: Signature algorithm method :type sign_algorithm: string """ return self.__build_signature(response_data, 'SAMLResponse', sign_algorithm)
Builds the Signature of the SAML Response. :param response_data: The Response parameters :type response_data: dict :param sign_algorithm: Signature algorithm method :type sign_algorithm: string
def collect_mean(self, fitness_functions): """! @brief Stores average value of fitness function among chromosomes on specific iteration. @param[in] fitness_functions (float): Average value of fitness functions among chromosomes. """ if not self._need_mean_ff: return self._mean_ff_result.append(np.mean(fitness_functions))
! @brief Stores average value of fitness function among chromosomes on specific iteration. @param[in] fitness_functions (float): Average value of fitness functions among chromosomes.
def IncrementCounter(self, metric_name, delta=1, fields=None): """See base class.""" if delta < 0: raise ValueError("Invalid increment for counter: %d." % delta) self._counter_metrics[metric_name].Increment(delta, fields)
See base class.
def wait_for_event(event): """ Wraps a win32 event into a `Future` and wait for it. """ f = Future() def ready(): get_event_loop().remove_win32_handle(event) f.set_result(None) get_event_loop().add_win32_handle(event, ready) return f
Wraps a win32 event into a `Future` and wait for it.
def replicate_per_farm_dbs(cloud_url=None, local_url=None, farm_name=None): """ Sete up replication of the per-farm databases from the local server to the cloud server. :param str cloud_url: Used to override the cloud url from the global configuration in case the calling function is in the process of initializing the cloud server :param str local_url: Used to override the local url from the global configuration in case the calling function is in the process of initializing the local server :param str farm_name: Used to override the farm name from the global configuratino in case the calling function is in the process of initializing the farm """ cloud_url = cloud_url or config["cloud_server"]["url"] local_url = local_url or config["local_server"]["url"] farm_name = farm_name or config["cloud_server"]["farm_name"] username = config["cloud_server"]["username"] password = config["cloud_server"]["password"] # Add credentials to the cloud url parsed_cloud_url = urlparse(cloud_url) if not parsed_cloud_url.username: new_netloc = "{}:{}@{}".format( username, password, parsed_cloud_url.netloc ) cloud_url = ParseResult( parsed_cloud_url.scheme, new_netloc, parsed_cloud_url.path, parsed_cloud_url.params, parsed_cloud_url.query, parsed_cloud_url.fragment ).geturl() server = Server(local_url) for db_name in per_farm_dbs: remote_db_name = "{}/{}/{}".format(username, farm_name, db_name) server.replicate( db_name, db_name, urljoin(cloud_url, remote_db_name), continuous=True )
Sete up replication of the per-farm databases from the local server to the cloud server. :param str cloud_url: Used to override the cloud url from the global configuration in case the calling function is in the process of initializing the cloud server :param str local_url: Used to override the local url from the global configuration in case the calling function is in the process of initializing the local server :param str farm_name: Used to override the farm name from the global configuratino in case the calling function is in the process of initializing the farm
def pull(configuration, *resources): """ Pull translations from all languages listed in conf/locale/config.yaml where there is at least 10% reviewed translations. If arguments are provided, they are specific resources to pull. Otherwise, all resources are pulled. """ print("Pulling conf/locale/config.yaml:locales from Transifex...") for lang in configuration.translated_locales: cmd = 'tx pull -f --mode=reviewed --minimum-perc=3 -l {lang}'.format(lang=lang) if resources: for resource in resources: execute(cmd + ' -r {resource}'.format(resource=resource)) else: execute(cmd) clean_translated_locales(configuration)
Pull translations from all languages listed in conf/locale/config.yaml where there is at least 10% reviewed translations. If arguments are provided, they are specific resources to pull. Otherwise, all resources are pulled.
def execute_greenlet_async(func, *args, **kwargs): """ Executes `func` in a separate greenlet in the same process. Memory and other resources are available (e.g. TCP connections etc.) `args` and `kwargs` are passed to `func`. """ global _GREENLET_EXECUTOR if _GREENLET_EXECUTOR is None: _GREENLET_EXECUTOR = GreenletExecutor( num_greenlets=settings.node.greenlet_pool_size) return _GREENLET_EXECUTOR.submit(func, *args, **kwargs)
Executes `func` in a separate greenlet in the same process. Memory and other resources are available (e.g. TCP connections etc.) `args` and `kwargs` are passed to `func`.
def _set_bad_packet(self, v, load=False): """ Setter method for bad_packet, mapped from YANG variable /rbridge_id/router/ospf/log/bad_packet (container) If this variable is read-only (config: false) in the source YANG file, then _set_bad_packet is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_bad_packet() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=bad_packet.bad_packet, is_container='container', presence=True, yang_name="bad-packet", rest_name="bad-packet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Logging Bad packets'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """bad_packet must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=bad_packet.bad_packet, is_container='container', presence=True, yang_name="bad-packet", rest_name="bad-packet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Logging Bad packets'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""", }) self.__bad_packet = t if hasattr(self, '_set'): self._set()
Setter method for bad_packet, mapped from YANG variable /rbridge_id/router/ospf/log/bad_packet (container) If this variable is read-only (config: false) in the source YANG file, then _set_bad_packet is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_bad_packet() directly.
def temp_dir(sub_dir='work'): """Obtain the temporary working directory for the operating system. An inasafe subdirectory will automatically be created under this and if specified, a user subdirectory under that. .. note:: You can use this together with unique_filename to create a file in a temporary directory under the inasafe workspace. e.g. tmpdir = temp_dir('testing') tmpfile = unique_filename(dir=tmpdir) print tmpfile /tmp/inasafe/23-08-2012/timlinux/testing/tmpMRpF_C If you specify INASAFE_WORK_DIR as an environment var, it will be used in preference to the system temp directory. :param sub_dir: Optional argument which will cause an additional subdirectory to be created e.g. /tmp/inasafe/foo/ :type sub_dir: str :return: Path to the temp dir that is created. :rtype: str :raises: Any errors from the underlying system calls. """ user = getpass.getuser().replace(' ', '_') current_date = date.today() date_string = current_date.isoformat() if 'INASAFE_WORK_DIR' in os.environ: new_directory = os.environ['INASAFE_WORK_DIR'] else: # Following 4 lines are a workaround for tempfile.tempdir() # unreliabilty handle, filename = mkstemp() os.close(handle) new_directory = os.path.dirname(filename) os.remove(filename) path = os.path.join(new_directory, 'inasafe', date_string, user, sub_dir) if not os.path.exists(path): # Ensure that the dir is world writable # Umask sets the new mask and returns the old old_mask = os.umask(0000) os.makedirs(path, 0o777) # Reinstate the old mask for tmp os.umask(old_mask) return path
Obtain the temporary working directory for the operating system. An inasafe subdirectory will automatically be created under this and if specified, a user subdirectory under that. .. note:: You can use this together with unique_filename to create a file in a temporary directory under the inasafe workspace. e.g. tmpdir = temp_dir('testing') tmpfile = unique_filename(dir=tmpdir) print tmpfile /tmp/inasafe/23-08-2012/timlinux/testing/tmpMRpF_C If you specify INASAFE_WORK_DIR as an environment var, it will be used in preference to the system temp directory. :param sub_dir: Optional argument which will cause an additional subdirectory to be created e.g. /tmp/inasafe/foo/ :type sub_dir: str :return: Path to the temp dir that is created. :rtype: str :raises: Any errors from the underlying system calls.
def _track_change(self, name, value, formatter=None): """Track that a change happened. This function is only needed for manually recording changes that are not captured by changes to properties of this object that are tracked automatically. Classes that inherit from `emulation_mixin` should use this function to record interesting changes in their internal state or events that happen. The `value` parameter that you pass here should be a native python object best representing what the value of the property that changed is. When saved to disk, it will be converted to a string using: `str(value)`. If you do not like the string that would result from such a call, you can pass a custom formatter that will be called as `formatter(value)` and must return a string. Args: name (str): The name of the property that changed. value (object): The new value of the property. formatter (callable): Optional function to convert value to a string. This function will only be called if track_changes() is enabled and `name` is on the whitelist for properties that should be tracked. If `formatter` is not passed or is None, it will default to `str` """ self._emulation_log.track_change(self._emulation_address, name, value, formatter)
Track that a change happened. This function is only needed for manually recording changes that are not captured by changes to properties of this object that are tracked automatically. Classes that inherit from `emulation_mixin` should use this function to record interesting changes in their internal state or events that happen. The `value` parameter that you pass here should be a native python object best representing what the value of the property that changed is. When saved to disk, it will be converted to a string using: `str(value)`. If you do not like the string that would result from such a call, you can pass a custom formatter that will be called as `formatter(value)` and must return a string. Args: name (str): The name of the property that changed. value (object): The new value of the property. formatter (callable): Optional function to convert value to a string. This function will only be called if track_changes() is enabled and `name` is on the whitelist for properties that should be tracked. If `formatter` is not passed or is None, it will default to `str`
def view_vector(self, vector, viewup=None): """Point the camera in the direction of the given vector""" focal_pt = self.center if viewup is None: viewup = rcParams['camera']['viewup'] cpos = [vector + np.array(focal_pt), focal_pt, viewup] self.camera_position = cpos return self.reset_camera()
Point the camera in the direction of the given vector
def get_sorted_proposal_list(self): """Return a list of `CodeAssistProposal`""" proposals = {} for proposal in self.proposals: proposals.setdefault(proposal.scope, []).append(proposal) result = [] for scope in self.scopepref: scope_proposals = proposals.get(scope, []) scope_proposals = [proposal for proposal in scope_proposals if proposal.type in self.typerank] scope_proposals.sort(key=self._proposal_key) result.extend(scope_proposals) return result
Return a list of `CodeAssistProposal`
def serialize_smarttag(ctx, document, el, root): "Serializes smarttag." if ctx.options['smarttag_span']: _span = etree.SubElement(root, 'span', {'class': 'smarttag', 'data-smarttag-element': el.element}) else: _span = root for elem in el.elements: _ser = ctx.get_serializer(elem) if _ser: _td = _ser(ctx, document, elem, _span) else: if isinstance(elem, doc.Text): children = list(_span) if len(children) == 0: _text = _span.text or u'' _span.text = u'{}{}'.format(_text, elem.text) else: _text = children[-1].tail or u'' children[-1].tail = u'{}{}'.format(_text, elem.text) fire_hooks(ctx, document, el, _span, ctx.get_hook('smarttag')) return root
Serializes smarttag.
def _build_error_report( self, message, report_location=None, http_context=None, user=None ): """Builds the Error Reporting object to report. This builds the object according to https://cloud.google.com/error-reporting/docs/formatting-error-messages :type message: str :param message: The stack trace that was reported or logged by the service. :type report_location: dict :param report_location: The location in the source code where the decision was made to report the error, usually the place where it was logged. For a logged exception this would be the source line where the exception is logged, usually close to the place where it was caught. This should be a Python dict that contains the keys 'filePath', 'lineNumber', and 'functionName' :type http_context: :class`google.cloud.error_reporting.HTTPContext` :param http_context: The HTTP request which was processed when the error was triggered. :type user: str :param user: The user who caused or was affected by the crash. This can be a user ID, an email address, or an arbitrary token that uniquely identifies the user. When sending an error report, leave this field empty if the user was not logged in. In this case the Error Reporting system will use other data, such as remote IP address, to distinguish affected users. :rtype: dict :returns: A dict payload ready to be serialized to JSON and sent to the API. """ payload = { "serviceContext": {"service": self.service}, "message": "{0}".format(message), } if self.version: payload["serviceContext"]["version"] = self.version if report_location or http_context or user: payload["context"] = {} if report_location: payload["context"]["reportLocation"] = report_location if http_context: http_context_dict = http_context.__dict__ # strip out None values payload["context"]["httpRequest"] = { key: value for key, value in six.iteritems(http_context_dict) if value is not None } if user: payload["context"]["user"] = user return payload
Builds the Error Reporting object to report. This builds the object according to https://cloud.google.com/error-reporting/docs/formatting-error-messages :type message: str :param message: The stack trace that was reported or logged by the service. :type report_location: dict :param report_location: The location in the source code where the decision was made to report the error, usually the place where it was logged. For a logged exception this would be the source line where the exception is logged, usually close to the place where it was caught. This should be a Python dict that contains the keys 'filePath', 'lineNumber', and 'functionName' :type http_context: :class`google.cloud.error_reporting.HTTPContext` :param http_context: The HTTP request which was processed when the error was triggered. :type user: str :param user: The user who caused or was affected by the crash. This can be a user ID, an email address, or an arbitrary token that uniquely identifies the user. When sending an error report, leave this field empty if the user was not logged in. In this case the Error Reporting system will use other data, such as remote IP address, to distinguish affected users. :rtype: dict :returns: A dict payload ready to be serialized to JSON and sent to the API.
def permute(sequence, permutation): """Apply a permutation sigma({j}) to an arbitrary sequence. :param sequence: Any finite length sequence ``[l_1,l_2,...l_n]``. If it is a list, tuple or str, the return type will be the same. :param permutation: permutation image tuple :type permutation: tuple :return: The permuted sequence ``[l_sigma(1), l_sigma(2), ..., l_sigma(n)]`` :raise: BadPermutationError or ValueError """ if len(sequence) != len(permutation): raise ValueError((sequence, permutation)) if not check_permutation(permutation): raise BadPermutationError(str(permutation)) if type(sequence) in (list, tuple, str): constructor = type(sequence) else: constructor = list return constructor((sequence[p] for p in permutation))
Apply a permutation sigma({j}) to an arbitrary sequence. :param sequence: Any finite length sequence ``[l_1,l_2,...l_n]``. If it is a list, tuple or str, the return type will be the same. :param permutation: permutation image tuple :type permutation: tuple :return: The permuted sequence ``[l_sigma(1), l_sigma(2), ..., l_sigma(n)]`` :raise: BadPermutationError or ValueError
def infer_modifications(stmts): """Return inferred Modification from RegulateActivity + ActiveForm. This function looks for combinations of Activation/Inhibition Statements and ActiveForm Statements that imply a Modification Statement. For example, if we know that A activates B, and phosphorylated B is active, then we can infer that A leads to the phosphorylation of B. An additional requirement when making this assumption is that the activity of B should only be dependent on the modified state and not other context - otherwise the inferred Modification is not necessarily warranted. Parameters ---------- stmts : list[indra.statements.Statement] A list of Statements to infer Modifications from. Returns ------- linked_stmts : list[indra.mechlinker.LinkedStatement] A list of LinkedStatements representing the inferred Statements. """ linked_stmts = [] for act_stmt in _get_statements_by_type(stmts, RegulateActivity): for af_stmt in _get_statements_by_type(stmts, ActiveForm): if not af_stmt.agent.entity_matches(act_stmt.obj): continue mods = af_stmt.agent.mods # Make sure the ActiveForm only involves modified sites if af_stmt.agent.mutations or \ af_stmt.agent.bound_conditions or \ af_stmt.agent.location: continue if not af_stmt.agent.mods: continue for mod in af_stmt.agent.mods: evs = act_stmt.evidence + af_stmt.evidence for ev in evs: ev.epistemics['direct'] = False if mod.is_modified: mod_type_name = mod.mod_type else: mod_type_name = modtype_to_inverse[mod.mod_type] mod_class = modtype_to_modclass[mod_type_name] if not mod_class: continue st = mod_class(act_stmt.subj, act_stmt.obj, mod.residue, mod.position, evidence=evs) ls = LinkedStatement([act_stmt, af_stmt], st) linked_stmts.append(ls) logger.info('inferred: %s' % st) return linked_stmts
Return inferred Modification from RegulateActivity + ActiveForm. This function looks for combinations of Activation/Inhibition Statements and ActiveForm Statements that imply a Modification Statement. For example, if we know that A activates B, and phosphorylated B is active, then we can infer that A leads to the phosphorylation of B. An additional requirement when making this assumption is that the activity of B should only be dependent on the modified state and not other context - otherwise the inferred Modification is not necessarily warranted. Parameters ---------- stmts : list[indra.statements.Statement] A list of Statements to infer Modifications from. Returns ------- linked_stmts : list[indra.mechlinker.LinkedStatement] A list of LinkedStatements representing the inferred Statements.
def put(self, thing_id='0', property_name=None): """ Handle a PUT request. thing_id -- ID of the thing this request is for property_name -- the name of the property from the URL path """ thing = self.get_thing(thing_id) if thing is None: self.set_status(404) return try: args = json.loads(self.request.body.decode()) except ValueError: self.set_status(400) return if property_name not in args: self.set_status(400) return if thing.has_property(property_name): try: thing.set_property(property_name, args[property_name]) except PropertyError: self.set_status(400) return self.set_header('Content-Type', 'application/json') self.write(json.dumps({ property_name: thing.get_property(property_name), })) else: self.set_status(404)
Handle a PUT request. thing_id -- ID of the thing this request is for property_name -- the name of the property from the URL path
def date(self, year: Number, month: Number, day: Number) -> Date: """ Takes three numbers and returns a ``Date`` object whose year, month, and day are the three numbers in that order. """ return Date(year, month, day)
Takes three numbers and returns a ``Date`` object whose year, month, and day are the three numbers in that order.
def resize(self, image, size): """ Resizes the image :param image: The image object :param size: size is PIL tuple (width, heigth, force) ex: (200,100,True) """ (width, height, force) = size if image.size[0] > width or image.size[1] > height: if force: return ImageOps.fit(self.image, (width, height), Image.ANTIALIAS) else: thumb = self.image.copy() thumb.thumbnail((width, height), Image.ANTIALIAS) return thumb return image
Resizes the image :param image: The image object :param size: size is PIL tuple (width, heigth, force) ex: (200,100,True)
def get_summary(self): """ Get summary of ``SNPs``. Returns ------- dict summary info, else None if ``SNPs`` is not valid """ if not self.is_valid(): return None else: return { "source": self.source, "assembly": self.assembly, "build": self.build, "build_detected": self.build_detected, "snp_count": self.snp_count, "chromosomes": self.chromosomes_summary, "sex": self.sex, }
Get summary of ``SNPs``. Returns ------- dict summary info, else None if ``SNPs`` is not valid
def basic_reject(self, delivery_tag, requeue): """Reject an incoming message This method allows a client to reject a message. It can be used to interrupt and cancel large incoming messages, or return untreatable messages to their original queue. RULE: The server SHOULD be capable of accepting and process the Reject method while sending message content with a Deliver or Get-Ok method. I.e. the server should read and process incoming methods while sending output frames. To cancel a partially-send content, the server sends a content body frame of size 1 (i.e. with no data except the frame-end octet). RULE: The server SHOULD interpret this method as meaning that the client is unable to process the message at this time. RULE: A client MUST NOT use this method as a means of selecting messages to process. A rejected message MAY be discarded or dead-lettered, not necessarily passed to another client. PARAMETERS: delivery_tag: longlong server-assigned delivery tag The server-assigned and channel-specific delivery tag RULE: The delivery tag is valid only within the channel from which the message was received. I.e. a client MUST NOT receive a message on one channel and then acknowledge it on another. RULE: The server MUST NOT use a zero value for delivery tags. Zero is reserved for client use, meaning "all messages so far received". requeue: boolean requeue the message If this field is False, the message will be discarded. If this field is True, the server will attempt to requeue the message. RULE: The server MUST NOT deliver the message to the same client within the context of the current channel. The recommended strategy is to attempt to deliver the message to an alternative consumer, and if that is not possible, to move the message to a dead-letter queue. The server MAY use more sophisticated tracking to hold the message on the queue and redeliver it to the same client at a later stage. """ args = AMQPWriter() args.write_longlong(delivery_tag) args.write_bit(requeue) self._send_method((60, 90), args)
Reject an incoming message This method allows a client to reject a message. It can be used to interrupt and cancel large incoming messages, or return untreatable messages to their original queue. RULE: The server SHOULD be capable of accepting and process the Reject method while sending message content with a Deliver or Get-Ok method. I.e. the server should read and process incoming methods while sending output frames. To cancel a partially-send content, the server sends a content body frame of size 1 (i.e. with no data except the frame-end octet). RULE: The server SHOULD interpret this method as meaning that the client is unable to process the message at this time. RULE: A client MUST NOT use this method as a means of selecting messages to process. A rejected message MAY be discarded or dead-lettered, not necessarily passed to another client. PARAMETERS: delivery_tag: longlong server-assigned delivery tag The server-assigned and channel-specific delivery tag RULE: The delivery tag is valid only within the channel from which the message was received. I.e. a client MUST NOT receive a message on one channel and then acknowledge it on another. RULE: The server MUST NOT use a zero value for delivery tags. Zero is reserved for client use, meaning "all messages so far received". requeue: boolean requeue the message If this field is False, the message will be discarded. If this field is True, the server will attempt to requeue the message. RULE: The server MUST NOT deliver the message to the same client within the context of the current channel. The recommended strategy is to attempt to deliver the message to an alternative consumer, and if that is not possible, to move the message to a dead-letter queue. The server MAY use more sophisticated tracking to hold the message on the queue and redeliver it to the same client at a later stage.
def OverwriteAndClose(self, compressed_data, size): """Directly overwrite the current contents. Replaces the data currently in the stream with compressed_data, and closes the object. Makes it possible to avoid recompressing the data. Args: compressed_data: The data to write, must be zlib compressed. size: The uncompressed size of the data. """ self.Set(self.Schema.CONTENT(compressed_data)) self.Set(self.Schema.SIZE(size)) super(AFF4MemoryStreamBase, self).Close()
Directly overwrite the current contents. Replaces the data currently in the stream with compressed_data, and closes the object. Makes it possible to avoid recompressing the data. Args: compressed_data: The data to write, must be zlib compressed. size: The uncompressed size of the data.
def iterative_overlap_assembly( variant_sequences, min_overlap_size=MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE): """ Assembles longer sequences from reads centered on a variant by between merging all pairs of overlapping sequences and collapsing shorter sequences onto every longer sequence which contains them. Returns a list of variant sequences, sorted by decreasing read support. """ if len(variant_sequences) <= 1: # if we don't have at least two sequences to start with then # skip the whole mess below return variant_sequences # reduce the number of inputs to the merge algorithm by first collapsing # shorter sequences onto the longer sequences which contain them n_before_collapse = len(variant_sequences) variant_sequences = collapse_substrings(variant_sequences) n_after_collapse = len(variant_sequences) logger.info( "Collapsed %d -> %d sequences", n_before_collapse, n_after_collapse) merged_variant_sequences = greedy_merge(variant_sequences, min_overlap_size) return list(sorted( merged_variant_sequences, key=lambda seq: -len(seq.reads)))
Assembles longer sequences from reads centered on a variant by between merging all pairs of overlapping sequences and collapsing shorter sequences onto every longer sequence which contains them. Returns a list of variant sequences, sorted by decreasing read support.