code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_results(cmd): """ def get_results(cmd: list) -> str: return lines Get the ping results using fping. :param cmd: List - the fping command and its options :return: String - raw string output containing csv fping results including the newline characters """ try: return subprocess.check_output(cmd) except subprocess.CalledProcessError as e: return e.output
def get_results(cmd: list) -> str: return lines Get the ping results using fping. :param cmd: List - the fping command and its options :return: String - raw string output containing csv fping results including the newline characters
def download(self, filename=None): """Download an attachment. The files are currently not cached since they can be overwritten on the server. Parameters ---------- filename : string, optional Optional name for the file on local disk. Returns ------- string Path to downloaded temporary file on disk """ tmp_file, f_suffix = download_file(self.url) if not filename is None: shutil.move(tmp_file, filename) return filename else: return tmp_file
Download an attachment. The files are currently not cached since they can be overwritten on the server. Parameters ---------- filename : string, optional Optional name for the file on local disk. Returns ------- string Path to downloaded temporary file on disk
def render_revalidation_failure(self, failed_step, form, **kwargs): """ When a step fails, we have to redirect the user to the first failing step. """ self.storage.current_step = failed_step return redirect(self.url_name, step=failed_step)
When a step fails, we have to redirect the user to the first failing step.
def __start_commoncrawl_extractor(warc_download_url, callback_on_article_extracted=None, valid_hosts=None, start_date=None, end_date=None, strict_date=True, reuse_previously_downloaded_files=True, local_download_dir_warc=None, continue_after_error=True, show_download_progress=False, log_level=logging.ERROR, delete_warc_after_extraction=True, continue_process=True, log_pathname_fully_extracted_warcs=None): """ Starts a single CommonCrawlExtractor :param warc_download_url: :param callback_on_article_extracted: :param valid_hosts: :param start_date: :param end_date: :param strict_date: :param reuse_previously_downloaded_files: :param local_download_dir_warc: :param continue_after_error: :param show_download_progress: :param log_level: :return: """ commoncrawl_extractor = CommonCrawlExtractor() commoncrawl_extractor.extract_from_commoncrawl(warc_download_url, callback_on_article_extracted, valid_hosts=valid_hosts, start_date=start_date, end_date=end_date, strict_date=strict_date, reuse_previously_downloaded_files=reuse_previously_downloaded_files, local_download_dir_warc=local_download_dir_warc, continue_after_error=continue_after_error, show_download_progress=show_download_progress, log_level=log_level, delete_warc_after_extraction=delete_warc_after_extraction, log_pathname_fully_extracted_warcs=__log_pathname_fully_extracted_warcs)
Starts a single CommonCrawlExtractor :param warc_download_url: :param callback_on_article_extracted: :param valid_hosts: :param start_date: :param end_date: :param strict_date: :param reuse_previously_downloaded_files: :param local_download_dir_warc: :param continue_after_error: :param show_download_progress: :param log_level: :return:
def enable_disable(self): """ Enable or disable this endpoint. If enabled, it will be disabled and vice versa. :return: None """ if self.enabled: self.data['enabled'] = False else: self.data['enabled'] = True self.update()
Enable or disable this endpoint. If enabled, it will be disabled and vice versa. :return: None
def loadSignal(self, name, start=None, end=None): """ Loads the named entry from the upload cache as a signal. :param name: the name. :param start: the time to start from in HH:mm:ss.SSS format :param end: the time to end at in HH:mm:ss.SSS format. :return: the signal if the named upload exists. """ entry = self._getCacheEntry(name) if entry is not None: from analyser.common.signal import loadSignalFromWav return loadSignalFromWav(entry['path'], start=start, end=end) else: return None
Loads the named entry from the upload cache as a signal. :param name: the name. :param start: the time to start from in HH:mm:ss.SSS format :param end: the time to end at in HH:mm:ss.SSS format. :return: the signal if the named upload exists.
def generate_key_pair(size=2048, public_exponent=65537, as_string=True): """ Generate a public/private key pair. :param size: Optional. Describes how many bits long the key should be, larger keys provide more security, currently 1024 and below are considered breakable, and 2048 or 4096 are reasonable default key sizes for new keys. Defaults to 2048. :param public_exponent: Optional. Indicates what one mathematical property of the key generation will be. 65537 is the default and should almost always be used. :param as_string: Optional. If True, return tuple of strings. If false, return tuple of RSA key objects. Defaults to True. :return: (PrivateKey<string>, PublicKey<string>) :return: ( `RSAPrivateKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey>`_, `RSAPublicKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey>`_) """ private = rsa.generate_private_key( public_exponent=public_exponent, key_size=size, backend=default_backend() ) public = private.public_key() if not as_string: return private, public pem_private = private.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, NoEncryption()).decode(ENCODING) pem_public = public.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode(ENCODING) return pem_private, pem_public
Generate a public/private key pair. :param size: Optional. Describes how many bits long the key should be, larger keys provide more security, currently 1024 and below are considered breakable, and 2048 or 4096 are reasonable default key sizes for new keys. Defaults to 2048. :param public_exponent: Optional. Indicates what one mathematical property of the key generation will be. 65537 is the default and should almost always be used. :param as_string: Optional. If True, return tuple of strings. If false, return tuple of RSA key objects. Defaults to True. :return: (PrivateKey<string>, PublicKey<string>) :return: ( `RSAPrivateKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey>`_, `RSAPublicKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey>`_)
def compute_curl(self, vector_field): """Computes the curl of a vector field over the mesh. While the vector field is point-based, the curl will be cell-based. The approximation is based on .. math:: n\\cdot curl(F) = \\lim_{A\\to 0} |A|^{-1} <\\int_{dGamma}, F> dr; see <https://en.wikipedia.org/wiki/Curl_(mathematics)>. Actually, to approximate the integral, one would only need the projection of the vector field onto the edges at the midpoint of the edges. """ # Compute the projection of A on the edge at each edge midpoint. # Take the average of `vector_field` at the endpoints to get the # approximate value at the edge midpoint. A = 0.5 * numpy.sum(vector_field[self.idx_hierarchy], axis=0) # sum of <edge, A> for all three edges sum_edge_dot_A = numpy.einsum("ijk, ijk->j", self.half_edge_coords, A) # Get normalized vector orthogonal to triangle z = numpy.cross(self.half_edge_coords[0], self.half_edge_coords[1]) # Now compute # # curl = z / ||z|| * sum_edge_dot_A / |A|. # # Since ||z|| = 2*|A|, one can save a sqrt and do # # curl = z * sum_edge_dot_A * 0.5 / |A|^2. # curl = z * (0.5 * sum_edge_dot_A / self.cell_volumes ** 2)[..., None] return curl
Computes the curl of a vector field over the mesh. While the vector field is point-based, the curl will be cell-based. The approximation is based on .. math:: n\\cdot curl(F) = \\lim_{A\\to 0} |A|^{-1} <\\int_{dGamma}, F> dr; see <https://en.wikipedia.org/wiki/Curl_(mathematics)>. Actually, to approximate the integral, one would only need the projection of the vector field onto the edges at the midpoint of the edges.
def __add_annotation_tier(self, docgraph, body, annotation_layer): """ adds a span-based annotation layer as a <tier> to the Exmaralda <body>. Parameter --------- docgraph : DiscourseDocumentGraph the document graph from which the chains will be extracted body : etree._Element an etree representation of the <basic_body> element (and all its descendants) of the Exmaralda file annotation_layer : str the name of a layer, e.g. 'tiger', 'tiger:token' or 'mmax:sentence' """ layer_cat = annotation_layer.split(':')[-1] temp_tier = self.E('tier', {'id': "TIE{}".format(self.tier_count), 'category': layer_cat, 'type': "t", 'display-name': "[{}]".format(annotation_layer)}) self.tier_count += 1 for node_id in select_nodes_by_layer(docgraph, annotation_layer): span_node_ids = get_span(docgraph, node_id) if span_node_ids: start_id, end_id = self.__span2event(span_node_ids) event_label = docgraph.node[node_id].get('label', '') event = self.E('event', {'start': "T{}".format(start_id), 'end': "T{}".format(end_id)}, event_label) temp_tier.append(event) body.append(temp_tier)
adds a span-based annotation layer as a <tier> to the Exmaralda <body>. Parameter --------- docgraph : DiscourseDocumentGraph the document graph from which the chains will be extracted body : etree._Element an etree representation of the <basic_body> element (and all its descendants) of the Exmaralda file annotation_layer : str the name of a layer, e.g. 'tiger', 'tiger:token' or 'mmax:sentence'
def pulse_magnitude(time, magnitude, start, repeat_time=0): """ Implements xmile's PULSE function PULSE: Generate a one-DT wide pulse at the given time Parameters: 2 or 3: (magnitude, first time[, interval]) Without interval or when interval = 0, the PULSE is generated only once Example: PULSE(20, 12, 5) generates a pulse value of 20/DT at time 12, 17, 22, etc. In rage [-inf, start) returns 0 In range [start + n * repeat_time, start + n * repeat_time + dt) return magnitude/dt In rage [start + n * repeat_time + dt, start + (n + 1) * repeat_time) return 0 """ t = time() small = 1e-6 # What is considered zero according to Vensim Help if repeat_time <= small: if abs(t - start) < time.step(): return magnitude * time.step() else: return 0 else: if abs((t - start) % repeat_time) < time.step(): return magnitude * time.step() else: return 0
Implements xmile's PULSE function PULSE: Generate a one-DT wide pulse at the given time Parameters: 2 or 3: (magnitude, first time[, interval]) Without interval or when interval = 0, the PULSE is generated only once Example: PULSE(20, 12, 5) generates a pulse value of 20/DT at time 12, 17, 22, etc. In rage [-inf, start) returns 0 In range [start + n * repeat_time, start + n * repeat_time + dt) return magnitude/dt In rage [start + n * repeat_time + dt, start + (n + 1) * repeat_time) return 0
def interactive(proto_dataset_uri): """Interactive prompting to populate the readme.""" proto_dataset = dtoolcore.ProtoDataSet.from_uri( uri=proto_dataset_uri, config_path=CONFIG_PATH) # Create an CommentedMap representation of the yaml readme template. readme_template = _get_readme_template() yaml = YAML() yaml.explicit_start = True yaml.indent(mapping=2, sequence=4, offset=2) descriptive_metadata = yaml.load(readme_template) descriptive_metadata = _prompt_for_values(descriptive_metadata) # Write out the descriptive metadata to the readme file. stream = StringIO() yaml.dump(descriptive_metadata, stream) proto_dataset.put_readme(stream.getvalue()) click.secho("Updated readme ", fg="green") click.secho("To edit the readme using your default editor:") click.secho( "dtool readme edit {}".format(proto_dataset_uri), fg="cyan")
Interactive prompting to populate the readme.
def _process_infohash_list(infohash_list): """ Method to convert the infohash_list to qBittorrent API friendly values. :param infohash_list: List of infohash. """ if isinstance(infohash_list, list): data = {'hashes': '|'.join([h.lower() for h in infohash_list])} else: data = {'hashes': infohash_list.lower()} return data
Method to convert the infohash_list to qBittorrent API friendly values. :param infohash_list: List of infohash.
def status(self): """ Returns modified, added, removed, deleted files for current changeset """ return self.repository._repo.status(self._ctx.p1().node(), self._ctx.node())
Returns modified, added, removed, deleted files for current changeset
def get_longest_target_alignment_coords_by_name(self,name): """For a name get the best alignment :return: [filebyte,innerbyte] describing the to distance the zipped block start, and the distance within the unzipped block :rtype: list """ longest = -1 coord = None #for x in self._queries[self._name_to_num[name]]: for line in [self._lines[x] for x in self._name_to_num[name]]: if line['flag'] & 2304 == 0: return [line['filestart'],line['innerstart']] return None sys.stderr.write("ERROR: no primary alignment set in index\n") sys.exit()
For a name get the best alignment :return: [filebyte,innerbyte] describing the to distance the zipped block start, and the distance within the unzipped block :rtype: list
def resources(self): ''' get total resources and available ones ''' used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource]['total'] = total if resource in used_resources: ret[resource]['used'] = used_resources[resource] else: ret[resource]['used'] = 0 return ret
get total resources and available ones
def angular_distance(first, second, bidirectional=True): """ Calculate the angular distance between two linear features or elementwise angular distance between two sets of linear features. (Note: a linear feature in this context is a point on a stereonet represented by a single latitude and longitude.) Parameters ---------- first : (lon, lat) 2xN array-like or sequence of two numbers The longitudes and latitudes of the first measurements in radians. second : (lon, lat) 2xN array-like or sequence of two numbers The longitudes and latitudes of the second measurements in radians. bidirectional : boolean If True, only "inner" angles will be returned. In other words, all angles returned by this function will be in the range [0, pi/2] (0 to 90 in degrees). Otherwise, ``first`` and ``second`` will be treated as vectors going from the origin outwards instead of bidirectional infinite lines. Therefore, with ``bidirectional=False``, angles returned by this function will be in the range [0, pi] (zero to 180 degrees). Returns ------- dist : array The elementwise angular distance between each pair of measurements in (lon1, lat1) and (lon2, lat2). Examples -------- Calculate the angle between two lines specified as a plunge/bearing >>> angle = angular_distance(line(30, 270), line(40, 90)) >>> np.degrees(angle) array([ 70.]) Let's do the same, but change the "bidirectional" argument: >>> first, second = line(30, 270), line(40, 90) >>> angle = angular_distance(first, second, bidirectional=False) >>> np.degrees(angle) array([ 110.]) Calculate the angle between two planes. >>> angle = angular_distance(pole(0, 10), pole(180, 10)) >>> np.degrees(angle) array([ 20.]) """ lon1, lat1 = first lon2, lat2 = second lon1, lat1, lon2, lat2 = np.atleast_1d(lon1, lat1, lon2, lat2) xyz1 = sph2cart(lon1, lat1) xyz2 = sph2cart(lon2, lat2) # This is just a dot product, but we need to work with multiple measurements # at once, so einsum is quicker than apply_along_axis. dot = np.einsum('ij,ij->j', xyz1, xyz2) angle = np.arccos(dot) # There are numerical sensitivity issues around 180 and 0 degrees... # Sometimes a result will have an absolute value slighly over 1. if np.any(np.isnan(angle)): rtol = 1e-4 angle[np.isclose(dot, -1, rtol)] = np.pi angle[np.isclose(dot, 1, rtol)] = 0 if bidirectional: mask = angle > np.pi / 2 angle[mask] = np.pi - angle[mask] return angle
Calculate the angular distance between two linear features or elementwise angular distance between two sets of linear features. (Note: a linear feature in this context is a point on a stereonet represented by a single latitude and longitude.) Parameters ---------- first : (lon, lat) 2xN array-like or sequence of two numbers The longitudes and latitudes of the first measurements in radians. second : (lon, lat) 2xN array-like or sequence of two numbers The longitudes and latitudes of the second measurements in radians. bidirectional : boolean If True, only "inner" angles will be returned. In other words, all angles returned by this function will be in the range [0, pi/2] (0 to 90 in degrees). Otherwise, ``first`` and ``second`` will be treated as vectors going from the origin outwards instead of bidirectional infinite lines. Therefore, with ``bidirectional=False``, angles returned by this function will be in the range [0, pi] (zero to 180 degrees). Returns ------- dist : array The elementwise angular distance between each pair of measurements in (lon1, lat1) and (lon2, lat2). Examples -------- Calculate the angle between two lines specified as a plunge/bearing >>> angle = angular_distance(line(30, 270), line(40, 90)) >>> np.degrees(angle) array([ 70.]) Let's do the same, but change the "bidirectional" argument: >>> first, second = line(30, 270), line(40, 90) >>> angle = angular_distance(first, second, bidirectional=False) >>> np.degrees(angle) array([ 110.]) Calculate the angle between two planes. >>> angle = angular_distance(pole(0, 10), pole(180, 10)) >>> np.degrees(angle) array([ 20.])
def save(self, path_or_file, strict=True, fmt='auto'): """Serialize annotation as a JSON formatted stream to file. Parameters ---------- path_or_file : str or file-like Path to save the JAMS object on disk OR An open file descriptor to write into strict : bool Force strict schema validation fmt : str ['auto', 'jams', 'jamz'] The output encoding format. If `auto`, it is inferred from the file name. If the input is an open file handle, `jams` encoding is used. Raises ------ SchemaError If `strict == True` and the JAMS object fails schema or namespace validation. See also -------- validate """ self.validate(strict=strict) with _open(path_or_file, mode='w', fmt=fmt) as fdesc: json.dump(self.__json__, fdesc, indent=2)
Serialize annotation as a JSON formatted stream to file. Parameters ---------- path_or_file : str or file-like Path to save the JAMS object on disk OR An open file descriptor to write into strict : bool Force strict schema validation fmt : str ['auto', 'jams', 'jamz'] The output encoding format. If `auto`, it is inferred from the file name. If the input is an open file handle, `jams` encoding is used. Raises ------ SchemaError If `strict == True` and the JAMS object fails schema or namespace validation. See also -------- validate
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, samplingRatio=None, dropFieldIfAllNull=None, encoding=None, locale=None): """ Loads JSON files and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. :param path: string represents path to the JSON dataset, or a list of paths, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param samplingRatio: defines fraction of input JSON objects used for schema inferring. If None is set, it uses the default value, ``1.0``. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. >>> df1 = spark.read.json('python/test_support/sql/people.json') >>> df1.dtypes [('age', 'bigint'), ('name', 'string')] >>> rdd = sc.textFile('python/test_support/sql/people.json') >>> df2 = spark.read.json(rdd) >>> df2.dtypes [('age', 'bigint'), ('name', 'string')] """ self._set_opts( schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, samplingRatio=samplingRatio, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding, locale=locale) if isinstance(path, basestring): path = [path] if type(path) == list: return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) return self._df(self._jreader.json(jrdd)) else: raise TypeError("path can be only string, list or RDD")
Loads JSON files and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. :param path: string represents path to the JSON dataset, or a list of paths, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param samplingRatio: defines fraction of input JSON objects used for schema inferring. If None is set, it uses the default value, ``1.0``. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. >>> df1 = spark.read.json('python/test_support/sql/people.json') >>> df1.dtypes [('age', 'bigint'), ('name', 'string')] >>> rdd = sc.textFile('python/test_support/sql/people.json') >>> df2 = spark.read.json(rdd) >>> df2.dtypes [('age', 'bigint'), ('name', 'string')]
def to_dict(self): """ Constructs an dictionary representation of the Identity Object to be used in serializing to JSON :return: dict representing the object """ json_dict = {"apiKey": self.api_key, "userArn": self.user_arn, "cognitoAuthenticationType": self.cognito_authentication_type, "caller": self.caller, "userAgent": self.user_agent, "user": self.user, "cognitoIdentityPoolId": self.cognito_identity_pool_id, "cognitoAuthenticationProvider": self.cognito_authentication_provider, "sourceIp": self.source_ip, "accountId": self.account_id } return json_dict
Constructs an dictionary representation of the Identity Object to be used in serializing to JSON :return: dict representing the object
def button_change_send(self, time_boot_ms, last_change_ms, state, force_mavlink1=False): ''' Report button state change time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) last_change_ms : Time of last change of button state (uint32_t) state : Bitmap state of buttons (uint8_t) ''' return self.send(self.button_change_encode(time_boot_ms, last_change_ms, state), force_mavlink1=force_mavlink1)
Report button state change time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) last_change_ms : Time of last change of button state (uint32_t) state : Bitmap state of buttons (uint8_t)
def isbase(path1, path2): # type: (Text, Text) -> bool """Check if ``path1`` is a base of ``path2``. Arguments: path1 (str): A PyFilesytem path. path2 (str): A PyFilesytem path. Returns: bool: `True` if ``path2`` starts with ``path1`` Example: >>> isbase('foo/bar', 'foo/bar/baz/egg.txt') True """ _path1 = forcedir(abspath(path1)) _path2 = forcedir(abspath(path2)) return _path2.startswith(_path1)
Check if ``path1`` is a base of ``path2``. Arguments: path1 (str): A PyFilesytem path. path2 (str): A PyFilesytem path. Returns: bool: `True` if ``path2`` starts with ``path1`` Example: >>> isbase('foo/bar', 'foo/bar/baz/egg.txt') True
def multicore(function, cores, multiargs, **singleargs): """ wrapper for multicore process execution Parameters ---------- function individual function to be applied to each process item cores: int the number of subprocesses started/CPUs used; this value is reduced in case the number of subprocesses is smaller multiargs: dict a dictionary containing sub-function argument names as keys and lists of arguments to be distributed among the processes as values singleargs all remaining arguments which are invariant among the subprocesses Returns ------- None or list the return of the function for all subprocesses Notes ----- - all `multiargs` value lists must be of same length, i.e. all argument keys must be explicitly defined for each subprocess - all function arguments passed via `singleargs` must be provided with the full argument name and its value (i.e. argname=argval); default function args are not accepted - if the processes return anything else than None, this function will return a list of results - if all processes return None, this function will be of type void Examples -------- >>> def add(x, y, z): >>> return x + y + z >>> multicore(add, cores=2, multiargs={'x': [1, 2]}, y=5, z=9) [15, 16] >>> multicore(add, cores=2, multiargs={'x': [1, 2], 'y': [5, 6]}, z=9) [15, 17] See Also -------- :mod:`pathos.multiprocessing` """ tblib.pickling_support.install() # compare the function arguments with the multi and single arguments and raise errors if mismatches occur if sys.version_info >= (3, 0): check = inspect.getfullargspec(function) varkw = check.varkw else: check = inspect.getargspec(function) varkw = check.keywords if not check.varargs and not varkw: multiargs_check = [x for x in multiargs if x not in check.args] singleargs_check = [x for x in singleargs if x not in check.args] if len(multiargs_check) > 0: raise AttributeError('incompatible multi arguments: {0}'.format(', '.join(multiargs_check))) if len(singleargs_check) > 0: raise AttributeError('incompatible single arguments: {0}'.format(', '.join(singleargs_check))) # compare the list lengths of the multi arguments and raise errors if they are of different length arglengths = list(set([len(multiargs[x]) for x in multiargs])) if len(arglengths) > 1: raise AttributeError('multi argument lists of different length') # prevent starting more threads than necessary cores = cores if arglengths[0] >= cores else arglengths[0] # create a list of dictionaries each containing the arguments for individual # function calls to be passed to the multicore processes processlist = [dictmerge(dict([(arg, multiargs[arg][i]) for arg in multiargs]), singleargs) for i in range(len(multiargs[list(multiargs.keys())[0]]))] if platform.system() == 'Windows': # in Windows parallel processing needs to strictly be in a "if __name__ == '__main__':" wrapper # it was thus necessary to outsource this to a different script and try to serialize all input for sharing objects # https://stackoverflow.com/questions/38236211/why-multiprocessing-process-behave-differently-on-windows-and-linux-for-global-o # a helper script to perform the parallel processing script = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'multicore_helper.py') # a temporary file to write the serialized function variables tmpfile = os.path.join(tempfile.gettempdir(), 'spatialist_dump') # check if everything can be serialized if not dill.pickles([function, cores, processlist]): raise RuntimeError('cannot fully serialize function arguments;\n' ' see https://github.com/uqfoundation/dill for supported types') # write the serialized variables with open(tmpfile, 'wb') as tmp: dill.dump([function, cores, processlist], tmp, byref=False) # run the helper script proc = sp.Popen([sys.executable, script], stdin=sp.PIPE, stderr=sp.PIPE) out, err = proc.communicate() if proc.returncode != 0: raise RuntimeError(err.decode()) # retrieve the serialized output of the processing which was written to the temporary file by the helper script with open(tmpfile, 'rb') as tmp: result = dill.load(tmp) return result else: results = None def wrapper(**kwargs): try: return function(**kwargs) except Exception as e: return ExceptionWrapper(e) # block printing of the executed function with HiddenPrints(): # start pool of processes and do the work try: pool = mp.Pool(processes=cores) except NameError: raise ImportError("package 'pathos' could not be imported") results = pool.imap(lambda x: wrapper(**x), processlist) pool.close() pool.join() i = 0 out = [] for item in results: if isinstance(item, ExceptionWrapper): item.ee = type(item.ee)(str(item.ee) + "\n(called function '{}' with args {})" .format(function.__name__, processlist[i])) raise (item.re_raise()) out.append(item) i += 1 # evaluate the return of the processing function; # if any value is not None then the whole list of results is returned eval = [x for x in out if x is not None] if len(eval) == 0: return None else: return out
wrapper for multicore process execution Parameters ---------- function individual function to be applied to each process item cores: int the number of subprocesses started/CPUs used; this value is reduced in case the number of subprocesses is smaller multiargs: dict a dictionary containing sub-function argument names as keys and lists of arguments to be distributed among the processes as values singleargs all remaining arguments which are invariant among the subprocesses Returns ------- None or list the return of the function for all subprocesses Notes ----- - all `multiargs` value lists must be of same length, i.e. all argument keys must be explicitly defined for each subprocess - all function arguments passed via `singleargs` must be provided with the full argument name and its value (i.e. argname=argval); default function args are not accepted - if the processes return anything else than None, this function will return a list of results - if all processes return None, this function will be of type void Examples -------- >>> def add(x, y, z): >>> return x + y + z >>> multicore(add, cores=2, multiargs={'x': [1, 2]}, y=5, z=9) [15, 16] >>> multicore(add, cores=2, multiargs={'x': [1, 2], 'y': [5, 6]}, z=9) [15, 17] See Also -------- :mod:`pathos.multiprocessing`
def __check_label_image(label_image): """Check the label image for consistent labelling starting from 1.""" encountered_indices = scipy.unique(label_image) expected_indices = scipy.arange(1, label_image.max() + 1) if not encountered_indices.size == expected_indices.size or \ not (encountered_indices == expected_indices).all(): raise AttributeError('The supplied label image does either not contain any regions or they are not labeled consecutively starting from 1.')
Check the label image for consistent labelling starting from 1.
def create_job(self, builder_job, # type: Dict[Text, Any] wf_job=None, # type: Callable[[Dict[Text, Text], Callable[[Any, Any], Any], RuntimeContext], Generator[Any, None, None]] is_output=False ): # type: (...) -> Dict #TODO customise the file """Generate the new job object with RO specific relative paths.""" copied = copy.deepcopy(builder_job) relativised_input_objecttemp = {} # type: Dict[Text, Any] self._relativise_files(copied) def jdefault(o): return dict(o) if is_output: rel_path = posixpath.join(_posix_path(WORKFLOW), "primary-output.json") else: rel_path = posixpath.join(_posix_path(WORKFLOW), "primary-job.json") j = json_dumps(copied, indent=4, ensure_ascii=False, default=jdefault) with self.write_bag_file(rel_path) as file_path: file_path.write(j + u"\n") _logger.debug(u"[provenance] Generated customised job file: %s", rel_path) # Generate dictionary with keys as workflow level input IDs and values # as # 1) for files the relativised location containing hash # 2) for other attributes, the actual value. relativised_input_objecttemp = {} for key, value in copied.items(): if isinstance(value, MutableMapping): if value.get("class") in ("File", "Directory"): relativised_input_objecttemp[key] = value else: relativised_input_objecttemp[key] = value self.relativised_input_object.update( {k: v for k, v in relativised_input_objecttemp.items() if v}) return self.relativised_input_object
Generate the new job object with RO specific relative paths.
def parse_url_or_log(url, encoding='utf-8'): '''Parse and return a URLInfo. This function logs a warning if the URL cannot be parsed and returns None. ''' try: url_info = URLInfo.parse(url, encoding=encoding) except ValueError as error: _logger.warning(__( _('Unable to parse URL ‘{url}’: {error}.'), url=wpull.string.printable_str(url), error=error)) else: return url_info
Parse and return a URLInfo. This function logs a warning if the URL cannot be parsed and returns None.
def queryset(self, request, queryset): """ Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`. """ if self.value(): try: section = SectionPage.objects.get(id=self.value()) return queryset.child_of(section).all() except (ObjectDoesNotExist, MultipleObjectsReturned): return None
Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`.
def __make_request(self, url, method, data, auth, cookies, headers, proxies, timeout, verify): """Execute a request with the given data. Args: url (str): The URL to call. method (str): The method (e.g. `get` or `post`). data (str): The data to call the URL with. auth (obj): The authentication class. cookies (obj): The cookie dict. headers (obj): The header dict. proxies (obj): The proxies dict. timeout (int): The request timeout in seconds. verify (mixed): SSL verification. Returns: obj: The response object. """ request_by_method = getattr(requests, method) return request_by_method( url=url, data=data, auth=auth, cookies=cookies, headers=headers, proxies=proxies, timeout=timeout, verify=verify, allow_redirects=True, stream=False )
Execute a request with the given data. Args: url (str): The URL to call. method (str): The method (e.g. `get` or `post`). data (str): The data to call the URL with. auth (obj): The authentication class. cookies (obj): The cookie dict. headers (obj): The header dict. proxies (obj): The proxies dict. timeout (int): The request timeout in seconds. verify (mixed): SSL verification. Returns: obj: The response object.
def solar_irradiation(latitude, longitude, Z, moment, surface_tilt, surface_azimuth, T=None, P=None, solar_constant=1366.1, atmos_refract=0.5667, albedo=0.25, linke_turbidity=None, extraradiation_method='spencer', airmass_model='kastenyoung1989', cache=None): r'''Calculates the amount of solar radiation and radiation reflected back the atmosphere which hits a surface at a specified tilt, and facing a specified azimuth. This functions is a wrapper for the incredibly comprehensive `pvlib library <https://github.com/pvlib/pvlib-python>`_, and requires it to be installed. Parameters ---------- latitude : float Latitude, between -90 and 90 [degrees] longitude : float Longitude, between -180 and 180, [degrees] Z : float, optional Elevation above sea level for the position, [m] moment : datetime Time and date for the calculation, in local UTC time (not daylight savings time), [-] surface_tilt : float The angle above the horizontal of the object being hit by radiation, [degrees] surface_azimuth : float The angle the object is facing (positive North eastwards 0° to 360°), [degrees] T : float, optional Temperature of atmosphere at ground level, [K] P : float, optional Pressure of atmosphere at ground level, [Pa] solar_constant : float, optional The amount of solar radiation which reaches earth's disk (at a standardized distance of 1 AU); this constant is independent of activity or conditions on earth, but will vary throughout the sun's lifetime and may increase or decrease slightly due to solar activity, [W/m^2] atmos_refract : float, optional Atmospheric refractivity at sunrise/sunset (0.5667 deg is an often used value; this varies substantially and has an impact of a few minutes on when sunrise and sunset is), [degrees] albedo : float, optional The average amount of reflection of the terrain surrounding the object at quite a distance; this impacts how much sunlight reflected off the ground, gest reflected back off clouds, [-] linke_turbidity : float, optional The amount of pollution/water in the sky versus a perfect clear sky; If not specified, this will be retrieved from a historical grid; typical values are 3 for cloudy, and 7 for severe pollution around a city, [-] extraradiation_method : str, optional The specified method to calculate the effect of earth's position on the amount of radiation which reaches earth according to the methods available in the `pvlib` library, [-] airmass_model : str, optional The specified method to calculate the amount of air the sunlight needs to travel through to reach the earth according to the methods available in the `pvlib` library, [-] cache : dict, optional Dictionary to to check for values to use to skip some calculations; `apparent_zenith`, `zenith`, `azimuth` supported, [-] Returns ------- poa_global : float The total irradiance in the plane of the surface, [W/m^2] poa_direct : float The total beam irradiance in the plane of the surface, [W/m^2] poa_diffuse : float The total diffuse irradiance in the plane of the surface, [W/m^2] poa_sky_diffuse : float The sky component of the diffuse irradiance, excluding the impact from the ground, [W/m^2] poa_ground_diffuse : float The ground-sky diffuse irradiance component, [W/m^2] Examples -------- >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 13, 43, 5), surface_tilt=41.0, ... surface_azimuth=180.0) (1065.7621896280812, 945.2656564506323, 120.49653317744884, 95.31535344213178, 25.181179735317063) >>> cache = {'apparent_zenith': 41.099082295767545, 'zenith': 41.11285376417578, 'azimuth': 182.5631874250523} >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 13, 43, 5), surface_tilt=41.0, ... linke_turbidity=3, T=300, P=1E5, ... surface_azimuth=180.0, cache=cache) (1042.5677703677097, 918.2377548545295, 124.33001551318027, 99.6228657378363, 24.70714977534396) At night, there is no solar radiation and this function returns zeros: >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 2, 43, 5), surface_tilt=41.0, ... surface_azimuth=180.0) (0.0, -0.0, 0.0, 0.0, 0.0) Notes ----- The retrieval of `linke_turbidity` requires the pytables library (and Pandas); if it is not installed, specify a value of `linke_turbidity` to avoid the dependency. There is some redundancy of the calculated results, according to the following relations. The total irradiance is normally that desired for engineering calculations. poa_diffuse = poa_ground_diffuse + poa_sky_diffuse poa_global = poa_direct + poa_diffuse FOr a surface such as a pipe or vessel, an approach would be to split it into a number of rectangles and sum up the radiation absorbed by each. This calculation is fairly slow. References ---------- .. [1] Will Holmgren, Calama-Consulting, Tony Lorenzo, Uwe Krien, bmu, DaCoEx, mayudong, et al. Pvlib/Pvlib-Python: 0.5.1. Zenodo, 2017. https://doi.org/10.5281/zenodo.1016425. ''' # Atmospheric refraction at sunrise/sunset (0.5667 deg is an often used value) from fluids.optional import spa from fluids.optional.irradiance import (get_relative_airmass, get_absolute_airmass, ineichen, get_relative_airmass, get_absolute_airmass, get_total_irradiance) # try: # import pvlib # except: # raise ImportError(PVLIB_MISSING_MSG) moment_timetuple = moment.timetuple() moment_arg_dni = (moment_timetuple.tm_yday if extraradiation_method == 'spencer' else moment) dni_extra = _get_extra_radiation_shim(moment_arg_dni, solar_constant=solar_constant, method=extraradiation_method, epoch_year=moment.year) if T is None or P is None: atmosphere = ATMOSPHERE_NRLMSISE00(Z=Z, latitude=latitude, longitude=longitude, day=moment_timetuple.tm_yday) if T is None: T = atmosphere.T if P is None: P = atmosphere.P if cache is not None and 'zenith' in cache: zenith = cache['zenith'] apparent_zenith = cache['apparent_zenith'] azimuth = cache['azimuth'] else: apparent_zenith, zenith, _, _, azimuth, _ = solar_position(moment=moment, latitude=latitude, longitude=longitude, Z=Z, T=T, P=P, atmos_refract=atmos_refract) if linke_turbidity is None: from pvlib.clearsky import lookup_linke_turbidity import pandas as pd linke_turbidity = float(lookup_linke_turbidity( pd.DatetimeIndex([moment]), latitude, longitude).values) if airmass_model in apparent_zenith_airmass_models: used_zenith = apparent_zenith elif airmass_model in true_zenith_airmass_models: used_zenith = zenith else: raise Exception('Unrecognized airmass model') relative_airmass = get_relative_airmass(used_zenith, model=airmass_model) airmass_absolute = get_absolute_airmass(relative_airmass, pressure=P) ans = ineichen(apparent_zenith=apparent_zenith, airmass_absolute=airmass_absolute, linke_turbidity=linke_turbidity, altitude=Z, dni_extra=solar_constant, perez_enhancement=True) ghi = ans['ghi'] dni = ans['dni'] dhi = ans['dhi'] # from pvlib.irradiance import get_total_irradiance ans = get_total_irradiance(surface_tilt=surface_tilt, surface_azimuth=surface_azimuth, solar_zenith=apparent_zenith, solar_azimuth=azimuth, dni=dni, ghi=ghi, dhi=dhi, dni_extra=dni_extra, airmass=airmass_absolute, albedo=albedo) poa_global = float(ans['poa_global']) poa_direct = float(ans['poa_direct']) poa_diffuse = float(ans['poa_diffuse']) poa_sky_diffuse = float(ans['poa_sky_diffuse']) poa_ground_diffuse = float(ans['poa_ground_diffuse']) return (poa_global, poa_direct, poa_diffuse, poa_sky_diffuse, poa_ground_diffuse)
r'''Calculates the amount of solar radiation and radiation reflected back the atmosphere which hits a surface at a specified tilt, and facing a specified azimuth. This functions is a wrapper for the incredibly comprehensive `pvlib library <https://github.com/pvlib/pvlib-python>`_, and requires it to be installed. Parameters ---------- latitude : float Latitude, between -90 and 90 [degrees] longitude : float Longitude, between -180 and 180, [degrees] Z : float, optional Elevation above sea level for the position, [m] moment : datetime Time and date for the calculation, in local UTC time (not daylight savings time), [-] surface_tilt : float The angle above the horizontal of the object being hit by radiation, [degrees] surface_azimuth : float The angle the object is facing (positive North eastwards 0° to 360°), [degrees] T : float, optional Temperature of atmosphere at ground level, [K] P : float, optional Pressure of atmosphere at ground level, [Pa] solar_constant : float, optional The amount of solar radiation which reaches earth's disk (at a standardized distance of 1 AU); this constant is independent of activity or conditions on earth, but will vary throughout the sun's lifetime and may increase or decrease slightly due to solar activity, [W/m^2] atmos_refract : float, optional Atmospheric refractivity at sunrise/sunset (0.5667 deg is an often used value; this varies substantially and has an impact of a few minutes on when sunrise and sunset is), [degrees] albedo : float, optional The average amount of reflection of the terrain surrounding the object at quite a distance; this impacts how much sunlight reflected off the ground, gest reflected back off clouds, [-] linke_turbidity : float, optional The amount of pollution/water in the sky versus a perfect clear sky; If not specified, this will be retrieved from a historical grid; typical values are 3 for cloudy, and 7 for severe pollution around a city, [-] extraradiation_method : str, optional The specified method to calculate the effect of earth's position on the amount of radiation which reaches earth according to the methods available in the `pvlib` library, [-] airmass_model : str, optional The specified method to calculate the amount of air the sunlight needs to travel through to reach the earth according to the methods available in the `pvlib` library, [-] cache : dict, optional Dictionary to to check for values to use to skip some calculations; `apparent_zenith`, `zenith`, `azimuth` supported, [-] Returns ------- poa_global : float The total irradiance in the plane of the surface, [W/m^2] poa_direct : float The total beam irradiance in the plane of the surface, [W/m^2] poa_diffuse : float The total diffuse irradiance in the plane of the surface, [W/m^2] poa_sky_diffuse : float The sky component of the diffuse irradiance, excluding the impact from the ground, [W/m^2] poa_ground_diffuse : float The ground-sky diffuse irradiance component, [W/m^2] Examples -------- >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 13, 43, 5), surface_tilt=41.0, ... surface_azimuth=180.0) (1065.7621896280812, 945.2656564506323, 120.49653317744884, 95.31535344213178, 25.181179735317063) >>> cache = {'apparent_zenith': 41.099082295767545, 'zenith': 41.11285376417578, 'azimuth': 182.5631874250523} >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 13, 43, 5), surface_tilt=41.0, ... linke_turbidity=3, T=300, P=1E5, ... surface_azimuth=180.0, cache=cache) (1042.5677703677097, 918.2377548545295, 124.33001551318027, 99.6228657378363, 24.70714977534396) At night, there is no solar radiation and this function returns zeros: >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 2, 43, 5), surface_tilt=41.0, ... surface_azimuth=180.0) (0.0, -0.0, 0.0, 0.0, 0.0) Notes ----- The retrieval of `linke_turbidity` requires the pytables library (and Pandas); if it is not installed, specify a value of `linke_turbidity` to avoid the dependency. There is some redundancy of the calculated results, according to the following relations. The total irradiance is normally that desired for engineering calculations. poa_diffuse = poa_ground_diffuse + poa_sky_diffuse poa_global = poa_direct + poa_diffuse FOr a surface such as a pipe or vessel, an approach would be to split it into a number of rectangles and sum up the radiation absorbed by each. This calculation is fairly slow. References ---------- .. [1] Will Holmgren, Calama-Consulting, Tony Lorenzo, Uwe Krien, bmu, DaCoEx, mayudong, et al. Pvlib/Pvlib-Python: 0.5.1. Zenodo, 2017. https://doi.org/10.5281/zenodo.1016425.
def setPositionLinkedTo(self, widgets): """ Sets the widget that this popup will be linked to for positional changes. :param widgets | <QWidget> || [<QWidget>, ..] """ if type(widgets) in (list, set, tuple): new_widgets = list(widgets) else: new_widgets = [] widget = widgets while widget: widget.installEventFilter(self) new_widgets.append(widget) widget = widget.parent() self._positionLinkedTo = new_widgets
Sets the widget that this popup will be linked to for positional changes. :param widgets | <QWidget> || [<QWidget>, ..]
def run(command, parser, cl_args, unknown_args): ''' :param command: :param parser: :param cl_args: :param unknown_args: :return: ''' Log.debug("Restart Args: %s", cl_args) container_id = cl_args['container-id'] if cl_args['deploy_mode'] == config.SERVER_MODE: dict_extra_args = {"container_id": str(container_id)} return cli_helper.run_server(command, cl_args, "restart topology", extra_args=dict_extra_args) else: list_extra_args = ["--container_id", str(container_id)] return cli_helper.run_direct(command, cl_args, "restart topology", extra_args=list_extra_args)
:param command: :param parser: :param cl_args: :param unknown_args: :return:
def compute_cumsum( df, id_cols: List[str], reference_cols: List[str], value_cols: List[str], new_value_cols: List[str] = None, cols_to_keep: List[str] = None ): """ Compute cumsum for a group of columns. --- ### Parameters *mandatory :* - `id_cols` (*list*): the columns id to create each group - `reference_cols` (*list*): the columns to order the cumsum - `value_cols` (*list*): the columns to cumsum *optional :* - `new_value_cols` (*list*): the new columns with the result cumsum - `cols_to_keep` (*list*): other columns to keep in the dataset. This option can be used if there is only one row by group [id_cols + reference_cols] --- ### Example **Input** MONTH | DAY | NAME | VALUE | X :---:|:---:|:--:|:---:|:---: 1 | 1 | A | 1 | lo 2 | 1 | A | 1 | lo 2 | 15 | A | 1 | la 1 | 15 | B | 1 | la ```cson compute_cumsum: id_cols: ['NAME'] reference_cols: ['MONTH', 'DAY'] cumsum_cols: ['VALUE'] cols_to_keep: ['X'] ``` **Output** NAME | MONTH | DAY | X | VALUE :---:|:---:|:--:|:---:|:---: A | 1 | 1 | lo | 1 A | 2 | 1 | la | 2 A | 2 | 15 | lo | 3 B | 1 | 15 | la | 1 """ if cols_to_keep is None: cols_to_keep = [] if new_value_cols is None: new_value_cols = value_cols if len(value_cols) != len(new_value_cols): raise ParamsValueError('`value_cols` and `new_value_cols` needs ' 'to have the same number of elements') check_params_columns_duplicate(id_cols + reference_cols + cols_to_keep + value_cols) levels = list(range(0, len(id_cols))) df = df.groupby(id_cols + reference_cols + cols_to_keep).sum() df[new_value_cols] = df.groupby(level=levels)[value_cols].cumsum() return df.reset_index()
Compute cumsum for a group of columns. --- ### Parameters *mandatory :* - `id_cols` (*list*): the columns id to create each group - `reference_cols` (*list*): the columns to order the cumsum - `value_cols` (*list*): the columns to cumsum *optional :* - `new_value_cols` (*list*): the new columns with the result cumsum - `cols_to_keep` (*list*): other columns to keep in the dataset. This option can be used if there is only one row by group [id_cols + reference_cols] --- ### Example **Input** MONTH | DAY | NAME | VALUE | X :---:|:---:|:--:|:---:|:---: 1 | 1 | A | 1 | lo 2 | 1 | A | 1 | lo 2 | 15 | A | 1 | la 1 | 15 | B | 1 | la ```cson compute_cumsum: id_cols: ['NAME'] reference_cols: ['MONTH', 'DAY'] cumsum_cols: ['VALUE'] cols_to_keep: ['X'] ``` **Output** NAME | MONTH | DAY | X | VALUE :---:|:---:|:--:|:---:|:---: A | 1 | 1 | lo | 1 A | 2 | 1 | la | 2 A | 2 | 15 | lo | 3 B | 1 | 15 | la | 1
def on_cache_changed(self, direct, which=None): """ A callback funtion, which sets local flags when the elements of some cached inputs change this function gets 'hooked up' to the inputs when we cache them, and upon their elements being changed we update here. """ for what in [direct, which]: ind_id = self.id(what) _, cache_ids = self.cached_input_ids.get(ind_id, [None, []]) for cache_id in cache_ids: self.inputs_changed[cache_id] = True
A callback funtion, which sets local flags when the elements of some cached inputs change this function gets 'hooked up' to the inputs when we cache them, and upon their elements being changed we update here.
def byte_number_string( number, thousandsSep=True, partition=False, base1024=True, appendBytes=True ): """Convert bytes into human-readable representation.""" magsuffix = "" bytesuffix = "" if partition: magnitude = 0 if base1024: while number >= 1024: magnitude += 1 number = number >> 10 else: while number >= 1000: magnitude += 1 number /= 1000.0 # TODO: use "9 KB" instead of "9K Bytes"? # TODO use 'kibi' for base 1024? # http://en.wikipedia.org/wiki/Kibi-#IEC_standard_prefixes magsuffix = ["", "K", "M", "G", "T", "P"][magnitude] if appendBytes: if number == 1: bytesuffix = " Byte" else: bytesuffix = " Bytes" if thousandsSep and (number >= 1000 or magsuffix): # locale.setlocale(locale.LC_ALL, "") # # TODO: make precision configurable # snum = locale.format("%d", number, thousandsSep) snum = "{:,d}".format(number) else: snum = str(number) return "{}{}{}".format(snum, magsuffix, bytesuffix)
Convert bytes into human-readable representation.
def extend_name(suffix): """A factory for class decorators that modify the class name by appending some text to it. Example: @extend_name('_Foo') class Class: pass assert Class.__name__ == 'Class_Foo' """ def dec(cls): name = '{}{}'.format(cls.__name__, suffix) setattr(cls, '__name__', name) return cls return dec
A factory for class decorators that modify the class name by appending some text to it. Example: @extend_name('_Foo') class Class: pass assert Class.__name__ == 'Class_Foo'
def _write_local_data_files(self, cursor): """ Takes a cursor, and writes results to a local file. :return: A dictionary where keys are filenames to be used as object names in GCS, and values are file handles to local files that contain the data for the GCS objects. """ schema = list(map(lambda schema_tuple: schema_tuple[0].replace(' ', '_'), cursor.description)) file_no = 0 tmp_file_handle = NamedTemporaryFile(delete=True) tmp_file_handles = {self.filename.format(file_no): tmp_file_handle} for row in cursor: # Convert if needed row = map(self.convert_types, row) row_dict = dict(zip(schema, row)) s = json.dumps(row_dict, sort_keys=True) s = s.encode('utf-8') tmp_file_handle.write(s) # Append newline to make dumps BQ compatible tmp_file_handle.write(b'\n') # Stop if the file exceeds the file size limit if tmp_file_handle.tell() >= self.approx_max_file_size_bytes: file_no += 1 tmp_file_handle = NamedTemporaryFile(delete=True) tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle return tmp_file_handles
Takes a cursor, and writes results to a local file. :return: A dictionary where keys are filenames to be used as object names in GCS, and values are file handles to local files that contain the data for the GCS objects.
def fsqrt(q): ''' given a non-negative fraction q, return a pair (a,b) such that q = a * a * b where b is a square-free integer. if q is a perfect square, a is its square root and b is one. ''' if q == 0: return q, 1 if q < 0: raise ValueError('math domain error %s' % q) a, b = isqrt(q.numerator) c, d = isqrt(q.denominator) # q == (a/c)**2 * (b/d) == (a/(c*d))**2 * b*d return Fraction(a, c * d), b * d
given a non-negative fraction q, return a pair (a,b) such that q = a * a * b where b is a square-free integer. if q is a perfect square, a is its square root and b is one.
def get_page_objects_by_ext_type(context, object_type): """ **Arguments** ``object_type`` object type :return selected objects """ try: objects = context['page']['ext_content'][object_type] except KeyError: raise template.TemplateSyntaxError('wrong content type: {0:>s}'.format(object_type)) return objects
**Arguments** ``object_type`` object type :return selected objects
def patch_traces( self, project_id, traces, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created. Example: >>> from google.cloud import trace_v1 >>> >>> client = trace_v1.TraceServiceClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # TODO: Initialize `traces`: >>> traces = {} >>> >>> client.patch_traces(project_id, traces) Args: project_id (str): ID of the Cloud project where the trace data is stored. traces (Union[dict, ~google.cloud.trace_v1.types.Traces]): The body of the message. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.trace_v1.types.Traces` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "patch_traces" not in self._inner_api_calls: self._inner_api_calls[ "patch_traces" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.patch_traces, default_retry=self._method_configs["PatchTraces"].retry, default_timeout=self._method_configs["PatchTraces"].timeout, client_info=self._client_info, ) request = trace_pb2.PatchTracesRequest(project_id=project_id, traces=traces) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("project_id", project_id)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) self._inner_api_calls["patch_traces"]( request, retry=retry, timeout=timeout, metadata=metadata )
Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created. Example: >>> from google.cloud import trace_v1 >>> >>> client = trace_v1.TraceServiceClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # TODO: Initialize `traces`: >>> traces = {} >>> >>> client.patch_traces(project_id, traces) Args: project_id (str): ID of the Cloud project where the trace data is stored. traces (Union[dict, ~google.cloud.trace_v1.types.Traces]): The body of the message. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.trace_v1.types.Traces` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def check_lengths(*arrays): """ tool to ensure input and output data have the same number of samples Parameters ---------- *arrays : iterable of arrays to be checked Returns ------- None """ lengths = [len(array) for array in arrays] if len(np.unique(lengths)) > 1: raise ValueError('Inconsistent data lengths: {}'.format(lengths))
tool to ensure input and output data have the same number of samples Parameters ---------- *arrays : iterable of arrays to be checked Returns ------- None
def apply_update(self, doc, update_spec): """Override DocManagerBase.apply_update to have flat documents.""" # Replace a whole document if not '$set' in update_spec and not '$unset' in update_spec: # update_spec contains the new document. # Update the key in Solr based on the unique_key mentioned as # parameter. update_spec['_id'] = doc[self.unique_key] return update_spec for to_set in update_spec.get("$set", []): value = update_spec['$set'][to_set] # Find dotted-path to the value, remove that key from doc, then # put value at key: keys_to_pop = [] for key in doc: if key.startswith(to_set): if key == to_set or key[len(to_set)] == '.': keys_to_pop.append(key) for key in keys_to_pop: doc.pop(key) doc[to_set] = value for to_unset in update_spec.get("$unset", []): # MongoDB < 2.5.2 reports $unset for fields that don't exist within # the document being updated. keys_to_pop = [] for key in doc: if key.startswith(to_unset): if key == to_unset or key[len(to_unset)] == '.': keys_to_pop.append(key) for key in keys_to_pop: doc.pop(key) return doc
Override DocManagerBase.apply_update to have flat documents.
def register(cls, use_admin=True): """Register with the API a :class:`sandman.model.Model` class and associated endpoint. :param cls: User-defined class derived from :class:`sandman.model.Model` to be registered with the endpoint returned by :func:`endpoint()` :type cls: :class:`sandman.model.Model` or tuple """ with app.app_context(): if getattr(current_app, 'class_references', None) is None: current_app.class_references = {} if isinstance(cls, (list, tuple)): for entry in cls: register_internal_data(entry) entry.use_admin = use_admin else: register_internal_data(cls) cls.use_admin = use_admin
Register with the API a :class:`sandman.model.Model` class and associated endpoint. :param cls: User-defined class derived from :class:`sandman.model.Model` to be registered with the endpoint returned by :func:`endpoint()` :type cls: :class:`sandman.model.Model` or tuple
def get_owner(self, default=True): """Return (User ID, Group ID) tuple :param bool default: Whether to return default if not set. :rtype: tuple[int, int] """ uid, gid = self.owner if not uid and default: uid = os.getuid() if not gid and default: gid = os.getgid() return uid, gid
Return (User ID, Group ID) tuple :param bool default: Whether to return default if not set. :rtype: tuple[int, int]
def is_email(): """ Validates that a fields value is a valid email address. """ email = ( ur'(?!^\.)' # No dot at start ur'(?!.*\.@)' # No dot before at sign ur'(?!.*@\.)' # No dot after at sign ur'(?!.*\.$)' # No dot at the end ur'(?!.*\.\.)' # No double dots anywhere ur'^\S+' # Starts with one or more non-whitespace characters ur'@' # Contains an at sign ur'\S+$' # Ends with one or more non-whitespace characters ) regex = re.compile(email, re.IGNORECASE | re.UNICODE) def validate(value): if not regex.match(value): return e("{} is not a valid email address", value) return validate
Validates that a fields value is a valid email address.
def update_user(self, user_id, **kwargs): """ Updates a user. :param user_id: The unique ID of the user. :type user_id: ``str`` """ properties = {} for attr, value in kwargs.items(): properties[self._underscore_to_camelcase(attr)] = value data = { "properties": properties } response = self._perform_request( url='/um/users/%s' % user_id, method='PUT', data=json.dumps(data)) return response
Updates a user. :param user_id: The unique ID of the user. :type user_id: ``str``
def build_tree(self, *args, **kwargs): """Dispatch a tree build call. Note that you need at least four taxa to express some evolutionary history on an unrooted tree.""" # Check length # assert len(self) > 3 # Default option # algorithm = kwargs.pop(kwargs, None) if algorithm is None: algorithm = 'raxml' # Dispatch # if algorithm is 'raxml': return self.build_tree_raxml(*args, **kwargs) if algorithm is 'fasttree': return self.build_tree_fast(*args, **kwargs)
Dispatch a tree build call. Note that you need at least four taxa to express some evolutionary history on an unrooted tree.
def mqtt_connected(func): """ MQTTClient coroutines decorator which will wait until connection before calling the decorated method. :param func: coroutine to be called once connected :return: coroutine result """ @asyncio.coroutine @wraps(func) def wrapper(self, *args, **kwargs): if not self._connected_state.is_set(): base_logger.warning("Client not connected, waiting for it") _, pending = yield from asyncio.wait([self._connected_state.wait(), self._no_more_connections.wait()], return_when=asyncio.FIRST_COMPLETED) for t in pending: t.cancel() if self._no_more_connections.is_set(): raise ClientException("Will not reconnect") return (yield from func(self, *args, **kwargs)) return wrapper
MQTTClient coroutines decorator which will wait until connection before calling the decorated method. :param func: coroutine to be called once connected :return: coroutine result
def is_dot(ip): """Return true if the IP address is in dotted decimal notation.""" octets = str(ip).split('.') if len(octets) != 4: return False for i in octets: try: val = int(i) except ValueError: return False if val > 255 or val < 0: return False return True
Return true if the IP address is in dotted decimal notation.
def export(self, file_path=None, export_format=None): """ Write the users to a file. """ with io.open(file_path, mode='w', encoding="utf-8") as export_file: if export_format == 'yaml': import yaml yaml.safe_dump(self.to_dict(), export_file, default_flow_style=False) elif export_format == 'json': export_file.write(text_type(json.dumps(self.to_dict(), ensure_ascii=False))) return True
Write the users to a file.
def _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'): """Plot an histogram from the data and return the AxesSubplot object. Parameters ---------- series : Series The data to plot figsize : tuple The size of the figure (width, height) in inches, default (6,4) facecolor : str The color code. Returns ------- matplotlib.AxesSubplot The plot. """ if base.get_vartype(series) == base.TYPE_DATE: # TODO: These calls should be merged fig = plt.figure(figsize=figsize) plot = fig.add_subplot(111) plot.set_ylabel('Frequency') try: plot.hist(series.dropna().values, facecolor=facecolor, bins=bins) except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead pass else: plot = series.plot(kind='hist', figsize=figsize, facecolor=facecolor, bins=bins) # TODO when running on server, send this off to a different thread return plot
Plot an histogram from the data and return the AxesSubplot object. Parameters ---------- series : Series The data to plot figsize : tuple The size of the figure (width, height) in inches, default (6,4) facecolor : str The color code. Returns ------- matplotlib.AxesSubplot The plot.
def save_performance(db, job_id, records): """ Save in the database the performance information about the given job. :param db: a :class:`openquake.server.dbapi.Db` instance :param job_id: a job ID :param records: a list of performance records """ # NB: rec['counts'] is a numpy.uint64 which is not automatically converted # into an int in Ubuntu 12.04, so we convert it manually below rows = [(job_id, rec['operation'], rec['time_sec'], rec['memory_mb'], int(rec['counts'])) for rec in records] db.insert('performance', 'job_id operation time_sec memory_mb counts'.split(), rows)
Save in the database the performance information about the given job. :param db: a :class:`openquake.server.dbapi.Db` instance :param job_id: a job ID :param records: a list of performance records
def needs_sync(self): """ Check if enough time has elapsed to perform a sync(). A call to sync() should be performed every now and then, no matter what has_state_changed() says. This is really just a safety thing to enforce consistency in case the state gets messed up. :rtype: boolean """ if self.lastforce is None: self.lastforce = time.time() return time.time() - self.lastforce >= self.forceipchangedetection_sleep
Check if enough time has elapsed to perform a sync(). A call to sync() should be performed every now and then, no matter what has_state_changed() says. This is really just a safety thing to enforce consistency in case the state gets messed up. :rtype: boolean
def unique_cpx_roots(rlist,tol = 0.001): """ The average of the root values is used when multiplicity is greater than one. Mark Wickert October 2016 """ uniq = [rlist[0]] mult = [1] for k in range(1,len(rlist)): N_uniq = len(uniq) for m in range(N_uniq): if abs(rlist[k]-uniq[m]) <= tol: mult[m] += 1 uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m]) break uniq = np.hstack((uniq,rlist[k])) mult = np.hstack((mult,[1])) return np.array(uniq), np.array(mult)
The average of the root values is used when multiplicity is greater than one. Mark Wickert October 2016
def xdg_config_dir(): ''' Check xdg locations for config files ''' xdg_config = os.getenv('XDG_CONFIG_HOME', os.path.expanduser('~/.config')) xdg_config_directory = os.path.join(xdg_config, 'salt') return xdg_config_directory
Check xdg locations for config files
def set_generator_training_nb(self, number): """ sets self.samples_per_epoch which is used in model.fit if input is a generator :param number: :return: """ self.samples_per_epoch = number diff_to_batch = number % self.get_batch_size() if diff_to_batch > 0: self.samples_per_epoch += self.get_batch_size() - diff_to_batch
sets self.samples_per_epoch which is used in model.fit if input is a generator :param number: :return:
def get_input(prompt, check, *, redo_prompt=None, repeat_prompt=False): """ Ask the user to input something on the terminal level, check their response and ask again if they didn't answer correctly """ if isinstance(check, str): check = (check,) to_join = [] for item in check: if item: to_join.append(str(item)) else: to_join.append("''") prompt += " [{}]: ".format('/'.join(to_join)) if repeat_prompt: redo_prompt = prompt elif not redo_prompt: redo_prompt = "Incorrect input, please choose from {}: " \ "".format(str(check)) if callable(check): def _checker(r): return check(r) elif isinstance(check, tuple): def _checker(r): return r in check else: raise ValueError(RESPONSES_ERROR.format(type(check))) response = input(prompt) while not _checker(response): print(response, type(response)) response = input(redo_prompt if redo_prompt else prompt) return response
Ask the user to input something on the terminal level, check their response and ask again if they didn't answer correctly
def gauge(self, name, value, rate=1): # type: (str, float, float) -> None """Send a Gauge metric with the specified value""" if self._should_send_metric(name, rate): if not is_numeric(value): value = float(value) self._request( Gauge( self._create_metric_name_for_request(name), value, rate ).to_request() )
Send a Gauge metric with the specified value
def addElement(self,etype='hex8',corners=[-1.0,-1.0,-1.0,1.,-1.0,-1.0,1.0,1.0,-1.0,-1.0,1.0,-1.0,-1.0,-1.0,1.0,1.0,-1.0,1.0,1.0,1.0,1.0,-1.0,1.0,1.0],name='new_elem'): ''' corners - list of nodal coordinates properly ordered for element type (counter clockwise) ''' lastelm = self.elements[-1][1] lastnode = self.nodes[-1][0] elm = [etype,lastelm+1] for i in range(old_div(len(corners),3)): elm.append(lastnode+1+i) self.elements.append(elm) self.elsets['e'+name] = {} self.elsets['e'+name][int(elm[1])] = True cnt = 1 self.nsets['n'+name] = [] for i in range(0,len(corners),3): self.nodes.append([lastnode+cnt, corners[i], corners[i+1], corners[i+2]]) self.nsets['n'+name].append(lastnode+cnt) cnt += 1 # if this is a quad4 or tri3 element make a surface set if etype == 'quad4' or etype == 'tri3': self.fsets['f'+name] = [[etype, MeshDef.facetID, lastnode+1, lastnode+2, lastnode+3, lastnode+4]] MeshDef.facetID += 1
corners - list of nodal coordinates properly ordered for element type (counter clockwise)
def first_produced_mesh(self): """The first produced mesh. :return: the first produced mesh :rtype: knittingpattern.Mesh.Mesh :raises IndexError: if no mesh is produced .. seealso:: :attr:`number_of_produced_meshes` """ for instruction in self.instructions: if instruction.produces_meshes(): return instruction.first_produced_mesh raise IndexError("{} produces no meshes".format(self))
The first produced mesh. :return: the first produced mesh :rtype: knittingpattern.Mesh.Mesh :raises IndexError: if no mesh is produced .. seealso:: :attr:`number_of_produced_meshes`
def pinyin_to_ipa(s): """Convert all Pinyin syllables in *s* to IPA. Spaces are added between connected syllables and syllable-separating apostrophes are removed. """ return _convert(s, zhon.pinyin.syllable, pinyin_syllable_to_ipa, remove_apostrophes=True, separate_syllables=True)
Convert all Pinyin syllables in *s* to IPA. Spaces are added between connected syllables and syllable-separating apostrophes are removed.
def pre_release(self): """ Return true if version is a pre-release. """ label = self.version_info.get('label', None) pre = self.version_info.get('pre', None) return True if (label is not None and pre is not None) else False
Return true if version is a pre-release.
def unsubscribe(self, coro): """ Unsubscribe from status updates from the Opentherm Gateway. Can only be used after connect() @coro is a coroutine which has been subscribed with subscribe() earlier. Return True on success, false if not connected or subscribed. """ if coro in self._notify: self._notify.remove(coro) return True return False
Unsubscribe from status updates from the Opentherm Gateway. Can only be used after connect() @coro is a coroutine which has been subscribed with subscribe() earlier. Return True on success, false if not connected or subscribed.
def send_start(remote, code, device=None, address=None): """ All parameters are passed to irsend. See the man page for irsend for details about their usage. Parameters ---------- remote: str code: str device: str address: str Notes ----- No attempt is made to catch or handle errors. See the documentation for subprocess.check_output to see the types of exceptions it may raise. """ args = ['send_start', remote, code] _call(args, device, address)
All parameters are passed to irsend. See the man page for irsend for details about their usage. Parameters ---------- remote: str code: str device: str address: str Notes ----- No attempt is made to catch or handle errors. See the documentation for subprocess.check_output to see the types of exceptions it may raise.
def p_parens_expr(p): """ expr : LPAREN expr RPAREN """ p[0] = node.expr(op="parens", args=node.expr_list([p[2]]))
expr : LPAREN expr RPAREN
def get_number_of_partitions_for(self, ar): """Return the number of selected partitions """ # fetch the number of partitions from the request uid = api.get_uid(ar) num = self.request.get("primary", {}).get(uid) if num is None: # get the number of partitions from the template template = ar.getTemplate() if template: num = len(template.getPartitions()) else: num = DEFAULT_NUMBER_OF_PARTITIONS try: num = int(num) except (TypeError, ValueError): num = DEFAULT_NUMBER_OF_PARTITIONS return num
Return the number of selected partitions
def _sort(values, axis=-1, direction='ASCENDING', stable=False, name=None): # pylint: disable=unused-argument """Numpy implementation of `tf.sort`.""" if direction == 'ASCENDING': pass elif direction == 'DESCENDING': values = np.negative(values) else: raise ValueError('Unrecognized direction: {}.'.format(direction)) result = np.sort(values, axis, kind='stable' if stable else 'quicksort') if direction == 'DESCENDING': return np.negative(result) return result
Numpy implementation of `tf.sort`.
def rescan(self): """Checks files and directories on watchlist for updates, rescans them for new data products. If any are found, returns them. Skips those in directories whose watchingState is set to Purr.UNWATCHED. """ if not self.attached: return dprint(5, "starting rescan") newstuff = {}; # this accumulates names of new or changed files. Keys are paths, values are 'quiet' flag. # store timestamp of scan self.last_scan_timestamp = time.time() # go through watched files/directories, check for mtime changes for path, watcher in list(self.watchers.items()): # get list of new files from watcher newfiles = watcher.newFiles() # None indicates access error, so drop it from watcher set if newfiles is None: if watcher.survive_deletion: dprintf(5, "access error on %s, but will still be watched\n", watcher.path) else: dprintf(2, "access error on %s, will no longer be watched\n", watcher.path) del self.watchers[path] if not watcher.disappeared: self.emit(SIGNAL("disappearedFile"), path) watcher.disappeared = True continue dprintf(5, "%s: %d new file(s)\n", watcher.path, len(newfiles)) # if a file has its own watcher, and is independently reported by a directory watcher, skip the directory's # version and let the file's watcher report it. Reason for this is that the file watcher may have a more # up-to-date timestamp, so we trust it over the dir watcher. newfiles = [p for p in newfiles if p is path or p not in self.watchers] # skip files in self._unwatched_paths newfiles = [filename for filename in newfiles if self._watching_state.get(os.path.dirname(filename)) > Purr.UNWATCHED] # Now go through files and add them to the newstuff dict for newfile in newfiles: # if quiet flag is explicitly set on watcher, enforce it # if not pouncing on directory, also add quietly if watcher.quiet or self._watching_state.get(os.path.dirname(newfile)) < Purr.POUNCE: quiet = True # else add quietly if file is not in the quiet patterns else: quiet = matches_patterns(os.path.basename(newfile), self._quiet_patterns) # add file to list of new products. Since a file may be reported by multiple # watchers, make the quiet flag a logical AND of all the quiet flags (i.e. DP will be # marked as quiet only if all watchers report it as quiet). newstuff[newfile] = quiet and newstuff.get(newfile, True) dprintf(4, "%s: new data product, quiet=%d (watcher quiet: %s)\n", newfile, quiet, watcher.quiet) # add a watcher for this file to the temp_watchers list. this is used below # to detect renamed and deleted files self.temp_watchers[newfile] = Purrer.WatchedFile(newfile) # now, go through temp_watchers to see if any newly pounced-on files have disappeared for path, watcher in list(self.temp_watchers.items()): # get list of new files from watcher if watcher.newFiles() is None: dprintf(2, "access error on %s, marking as disappeared", watcher.path) del self.temp_watchers[path] self.emit(SIGNAL("disappearedFile"), path) # if we have new data products, send them to the main window return self.makeDataProducts(iter(newstuff.items()))
Checks files and directories on watchlist for updates, rescans them for new data products. If any are found, returns them. Skips those in directories whose watchingState is set to Purr.UNWATCHED.
def inspect_image(self, image_name, image_tag=''): ''' a method to retrieve the settings of an image :param image_name: string with name or id of image :param image_tag: [optional] string with tag associated with image :return: dictionary of settings of image { TOO MANY TO LIST } ''' title = '%s.inspect_image' % self.__class__.__name__ # validate inputs input_fields = { 'image_name': image_name, 'image_tag': image_tag } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # determine system command argument sys_arg = image_name if image_tag: sys_arg += ':%s' % image_tag # run inspect command import json sys_command = 'docker inspect %s' % sys_arg output_dict = json.loads(self.command(sys_command)) image_settings = output_dict[0] return image_settings
a method to retrieve the settings of an image :param image_name: string with name or id of image :param image_tag: [optional] string with tag associated with image :return: dictionary of settings of image { TOO MANY TO LIST }
def _ceil(self, address): """ Returns the smallest page boundary value not less than the address. :rtype: int :param address: the address to calculate its ceil. :return: the ceil of C{address}. """ return (((address - 1) + self.page_size) & ~self.page_mask) & self.memory_mask
Returns the smallest page boundary value not less than the address. :rtype: int :param address: the address to calculate its ceil. :return: the ceil of C{address}.
def merge_with_master_config(self, config, defaults={}, delete_orphan_fields=False) -> dict: """ Merge current ontology with input master config. :param config: master config, should be str or dict :param defaults: a dict that sets default color and icon :param delete_orphan_fields: if a property doesn't exist in the ontology then delete it :return: merged master config in dict """ if isinstance(config, str): import json config = json.loads(config) properties = self.all_properties() config['fields'] = config.get('fields', dict()) fields = config['fields'] d_color = defaults.get('color', 'white') d_icon = defaults.get('icon', 'icons:default') if delete_orphan_fields: exist = {p.name() for p in properties} unexist = set(fields.keys()) - exist for name in unexist: del fields[name] for p in properties: field = fields.get(p.name(), {'show_in_search': False, 'combine_fields': False, 'number_of_rules': 0, 'glossaries': [], 'use_in_network_search': False, 'case_sensitive': False, 'show_as_link': 'text', 'blacklists': [], 'show_in_result': 'no', 'rule_extractor_enabled': False, 'search_importance': 1, 'group_name': '', 'show_in_facets': False, 'predefined_extractor': 'none', 'rule_extraction_target': ''}) config['fields'][p.name()] = field field['screen_label'] = ' '.join(p.label()) field['description'] = '\n'.join(p.definition()) field['name'] = p.name() # color if 'color' not in field: color = self.__merge_close_ancestor_color(p, fields, attr='color') field['color'] = color if color else d_color # icon if 'icon' not in field: icon = self.__merge_close_ancestor_color(p, fields, attr='icon') field['icon'] = icon if icon else d_icon # type if isinstance(p, OntologyObjectProperty): field['type'] = 'kg_id' else: try: field['type'] = self.__merge_xsd_to_type(next(iter(p.included_ranges()))) except StopIteration: field['type'] = None return config
Merge current ontology with input master config. :param config: master config, should be str or dict :param defaults: a dict that sets default color and icon :param delete_orphan_fields: if a property doesn't exist in the ontology then delete it :return: merged master config in dict
def new_workspace(self, name=None, layout=None, workspace_id=None, index=None) -> WorkspaceLayout.WorkspaceLayout: """ Create a new workspace, insert into document_model, and return it. """ workspace = WorkspaceLayout.WorkspaceLayout() self.document_model.insert_workspace(index if index is not None else len(self.document_model.workspaces), workspace) d = create_image_desc() d["selected"] = True workspace.layout = layout if layout is not None else d workspace.name = name if name is not None else _("Workspace") if workspace_id: workspace.workspace_id = workspace_id return workspace
Create a new workspace, insert into document_model, and return it.
def decode_list(input_props, name): # type: (Dict[str, str], str) -> List[str] """ Decodes a space-separated list """ val_str = input_props.get(name, None) if val_str: return val_str.split(" ") return []
Decodes a space-separated list
def shadowUpdate(self, srcJSONPayload, srcCallback, srcTimeout): """ **Description** Update the device shadow JSON document string from AWS IoT by publishing the provided JSON document to the corresponding shadow topics. Shadow response topics will be subscribed to receive responses from AWS IoT regarding the result of the get operation. Response will be available in the registered callback. If no response is received within the provided timeout, a timeout notification will be passed into the registered callback. **Syntax** .. code:: python # Update the shadow JSON document from AWS IoT, with a timeout set to 5 seconds BotShadow.shadowUpdate(newShadowJSONDocumentString, customCallback, 5) **Parameters** *srcJSONPayload* - JSON document string used to update shadow JSON document in AWS IoT. *srcCallback* - Function to be called when the response for this shadow request comes back. Should be in form :code:`customCallback(payload, responseStatus, token)`, where :code:`payload` is the JSON document returned, :code:`responseStatus` indicates whether the request has been accepted, rejected or is a delta message, :code:`token` is the token used for tracing in this request. *srcTimeout* - Timeout to determine whether the request is invalid. When a request gets timeout, a timeout notification will be generated and put into the registered callback to notify users. **Returns** The token used for tracing in this shadow request. """ # Validate JSON self._basicJSONParserHandler.setString(srcJSONPayload) if self._basicJSONParserHandler.validateJSON(): with self._dataStructureLock: # clientToken currentToken = self._tokenHandler.getNextToken() self._tokenPool[currentToken] = Timer(srcTimeout, self._timerHandler, ["update", currentToken]) self._basicJSONParserHandler.setAttributeValue("clientToken", currentToken) JSONPayloadWithToken = self._basicJSONParserHandler.regenerateString() # Update callback data structure self._shadowSubscribeCallbackTable["update"] = srcCallback # Update number of pending feedback self._shadowSubscribeStatusTable["update"] += 1 # Two subscriptions if not self._isPersistentSubscribe or not self._isUpdateSubscribed: self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "update", self.generalCallback) self._isUpdateSubscribed = True self._logger.info("Subscribed to update accepted/rejected topics for deviceShadow: " + self._shadowName) # One publish self._shadowManagerHandler.basicShadowPublish(self._shadowName, "update", JSONPayloadWithToken) # Start the timer self._tokenPool[currentToken].start() else: raise ValueError("Invalid JSON file.") return currentToken
**Description** Update the device shadow JSON document string from AWS IoT by publishing the provided JSON document to the corresponding shadow topics. Shadow response topics will be subscribed to receive responses from AWS IoT regarding the result of the get operation. Response will be available in the registered callback. If no response is received within the provided timeout, a timeout notification will be passed into the registered callback. **Syntax** .. code:: python # Update the shadow JSON document from AWS IoT, with a timeout set to 5 seconds BotShadow.shadowUpdate(newShadowJSONDocumentString, customCallback, 5) **Parameters** *srcJSONPayload* - JSON document string used to update shadow JSON document in AWS IoT. *srcCallback* - Function to be called when the response for this shadow request comes back. Should be in form :code:`customCallback(payload, responseStatus, token)`, where :code:`payload` is the JSON document returned, :code:`responseStatus` indicates whether the request has been accepted, rejected or is a delta message, :code:`token` is the token used for tracing in this request. *srcTimeout* - Timeout to determine whether the request is invalid. When a request gets timeout, a timeout notification will be generated and put into the registered callback to notify users. **Returns** The token used for tracing in this shadow request.
def secret_file(filename): """Will check the permissions of things which really should be secret files""" filestat = os.stat(abspath(filename)) if stat.S_ISREG(filestat.st_mode) == 0 and \ stat.S_ISLNK(filestat.st_mode) == 0: e_msg = "Secret file %s must be a real file or symlink" % filename raise aomi.exceptions.AomiFile(e_msg) if platform.system() != "Windows": if filestat.st_mode & stat.S_IROTH or \ filestat.st_mode & stat.S_IWOTH or \ filestat.st_mode & stat.S_IWGRP: e_msg = "Secret file %s has too loose permissions" % filename raise aomi.exceptions.AomiFile(e_msg)
Will check the permissions of things which really should be secret files
def compile_loaderplugin_entry(self, spec, entry): """ Generic loader plugin entry handler. The default implementation assumes that everything up to the first '!' symbol resolves to some known loader plugin within the registry. The registry instance responsible for the resolution of the loader plugin handlers must be available in the spec under CALMJS_LOADERPLUGIN_REGISTRY """ modname, source, target, modpath = entry handler = spec[CALMJS_LOADERPLUGIN_REGISTRY].get(modname) if handler: return handler(self, spec, modname, source, target, modpath) logger.warning( "no loaderplugin handler found for plugin entry '%s'", modname) return {}, {}, []
Generic loader plugin entry handler. The default implementation assumes that everything up to the first '!' symbol resolves to some known loader plugin within the registry. The registry instance responsible for the resolution of the loader plugin handlers must be available in the spec under CALMJS_LOADERPLUGIN_REGISTRY
def define_magic(self, name, func): """[Deprecated] Expose own function as magic function for IPython. Example:: def foo_impl(self, parameter_s=''): 'My very own magic!. (Use docstrings, IPython reads them).' print 'Magic function. Passed parameter is between < >:' print '<%s>' % parameter_s print 'The self object is:', self ip.define_magic('foo',foo_impl) """ meth = types.MethodType(func, self.user_magics) setattr(self.user_magics, name, meth) record_magic(self.magics, 'line', name, meth)
[Deprecated] Expose own function as magic function for IPython. Example:: def foo_impl(self, parameter_s=''): 'My very own magic!. (Use docstrings, IPython reads them).' print 'Magic function. Passed parameter is between < >:' print '<%s>' % parameter_s print 'The self object is:', self ip.define_magic('foo',foo_impl)
def hicup_filtering_chart(self): """ Generate the HiCUP filtering plot """ # Specify the order of the different possible categories keys = OrderedDict() keys['Valid_Pairs'] = { 'color': '#2f7ed8', 'name': 'Valid Pairs' } keys['Same_Fragment_Internal'] = { 'color': '#0d233a', 'name': 'Same Fragment - Internal' } keys['Same_Circularised'] = { 'color': '#910000', 'name': 'Same Fragment - Circularised' } keys['Same_Dangling_Ends'] = { 'color': '#8bbc21', 'name': 'Same Fragment - Dangling Ends' } keys['Re_Ligation'] = { 'color': '#1aadce', 'name': 'Re-ligation' } keys['Contiguous_Sequence'] = { 'color': '#f28f43', 'name': 'Contiguous Sequence' } keys['Wrong_Size'] = { 'color': '#492970', 'name': 'Wrong Size' } # Config for the plot config = { 'id': 'hicup_filtering_plot', 'title': 'HiCUP: Filtering Statistics', 'ylab': '# Read Pairs', 'cpswitch_counts_label': 'Number of Read Pairs', 'cpswitch_c_active': False } return bargraph.plot(self.hicup_data, keys, config)
Generate the HiCUP filtering plot
def vhost_exists(name, runas=None): ''' Return whether the vhost exists based on rabbitmqctl list_vhosts. CLI Example: .. code-block:: bash salt '*' rabbitmq.vhost_exists rabbit_host ''' if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.user.get_user() return name in list_vhosts(runas=runas)
Return whether the vhost exists based on rabbitmqctl list_vhosts. CLI Example: .. code-block:: bash salt '*' rabbitmq.vhost_exists rabbit_host
def validate(self): r"""Validate user credentials. Returns ------- bool Examples -------- Validate credentials. >>> from srpenergy.client import SrpEnergyClient >>> >>> accountid = 'your account id' >>> username = 'your username' >>> password = 'your password' >>> client = SrpEnergyClient(accountid, username, password) >>> >>> valid = client.validate() >>> print(valid) True """ try: with requests.Session() as session: result = session.get('https://www.srpnet.com/') result = session.post( 'https://myaccount.srpnet.com/sso/login/loginuser', data={'UserName': self.username, 'Password': self.password} ) result_string = result.content.decode("utf-8") soup = BeautifulSoup(result_string, "html.parser") account_select = soup.find( 'select', attrs={'name': 'accountNumber'} ) accounts = [] for option in account_select.find_all('option'): if option['value'] != 'newAccount': accounts.append(option['value']) valid = len(accounts) > 0 return valid except Exception: # pylint: disable=W0703 return False
r"""Validate user credentials. Returns ------- bool Examples -------- Validate credentials. >>> from srpenergy.client import SrpEnergyClient >>> >>> accountid = 'your account id' >>> username = 'your username' >>> password = 'your password' >>> client = SrpEnergyClient(accountid, username, password) >>> >>> valid = client.validate() >>> print(valid) True
def clear_bucket_props(self, bucket): """ Clear bucket properties, resetting them to their defaults """ if not self.pb_clear_bucket_props(): return False msg_code = riak.pb.messages.MSG_CODE_RESET_BUCKET_REQ codec = self._get_codec(msg_code) msg = codec.encode_clear_bucket_props(bucket) self._request(msg, codec) return True
Clear bucket properties, resetting them to their defaults
def close(self): """ Stops the read thread, waits for it to exit cleanly, then closes the underlying serial port """ self.alive = False self.rxThread.join() self.serial.close()
Stops the read thread, waits for it to exit cleanly, then closes the underlying serial port
def fastaParseSgd(header): """Custom parser for fasta headers in the SGD format, see www.yeastgenome.org. :param header: str, protein entry header from a fasta file :returns: dict, parsed header """ rePattern = '([\S]+)\s([\S]+).+(\".+\")' ID, name, description = re.match(rePattern, header).groups() info = {'id':ID, 'name':name, 'description':description} return info
Custom parser for fasta headers in the SGD format, see www.yeastgenome.org. :param header: str, protein entry header from a fasta file :returns: dict, parsed header
def import_parms(self, args): """Import external dict to internal dict""" for key, val in args.items(): self.set_parm(key, val)
Import external dict to internal dict
def as_unordered(self, inplace=False): """ Set the Categorical to be unordered. Parameters ---------- inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to False. """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(False, inplace=inplace)
Set the Categorical to be unordered. Parameters ---------- inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to False.
def _cast_to_type(self, value): """ Convert the value to a float and raise error on failures""" try: return float(value) except (ValueError, TypeError): self.fail('invalid', value=value)
Convert the value to a float and raise error on failures
def resize(self, height, width, **kwargs): """ resize pty of an execed process """ self.client.exec_resize(self.exec_id, height=height, width=width)
resize pty of an execed process
def delete(block_id): """Scheduling block detail resource.""" _url = get_root_url() LOG.debug('Requested delete of SBI %s', block_id) try: DB.delete_sched_block_instance(block_id) response = dict(message='Deleted block: _id = {}'.format(block_id)) response['_links'] = { 'list': '{}/scheduling-blocks'.format(_url) } return response, HTTPStatus.OK except RuntimeError as error: return dict(error=str(error)), HTTPStatus.BAD_REQUEST
Scheduling block detail resource.
def has_key(self, key): """Case insensitive test whether 'key' exists.""" k = self._lowerOrReturn(key) return k in self.data
Case insensitive test whether 'key' exists.
def isASLREnabled(self): """ Determines if the current L{PE} instance has the DYNAMICBASE (Use address space layout randomization) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/bb384887.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the DYNAMICBASE flag enabled. Otherwise, returns C{False}. """ return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE == consts.IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
Determines if the current L{PE} instance has the DYNAMICBASE (Use address space layout randomization) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/bb384887.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the DYNAMICBASE flag enabled. Otherwise, returns C{False}.
def _cluster_hits(hits, clusters, assigned_hit_array, cluster_hit_indices, column_cluster_distance, row_cluster_distance, frame_cluster_distance, min_hit_charge, max_hit_charge, ignore_same_hits, noisy_pixels, disabled_pixels): ''' Main precompiled function that loopes over the hits and clusters them ''' total_hits = hits.shape[0] if total_hits == 0: return 0 # total clusters max_cluster_hits = cluster_hit_indices.shape[0] if total_hits != clusters.shape[0]: raise ValueError("hits and clusters must be the same size") if total_hits != assigned_hit_array.shape[0]: raise ValueError("hits and assigned_hit_array must be the same size") # Correction for charge weighting # Some chips have non-zero charge for a charge value of zero, charge needs to be corrected to calculate cluster center correctly if min_hit_charge == 0: charge_correction = 1 else: charge_correction = 0 # Temporary variables that are reset for each cluster or event start_event_hit_index = 0 start_event_cluster_index = 0 cluster_size = 0 event_number = hits[0]['event_number'] event_cluster_index = 0 # Outer loop over all hits in the array (referred to as actual hit) for i in range(total_hits): # Check for new event and reset event variables if _new_event(hits[i]['event_number'], event_number): _finish_event( hits=hits, clusters=clusters, start_event_hit_index=start_event_hit_index, stop_event_hit_index=i, start_event_cluster_index=start_event_cluster_index, stop_event_cluster_index=start_event_cluster_index + event_cluster_index) start_event_hit_index = i start_event_cluster_index = start_event_cluster_index + event_cluster_index event_number = hits[i]['event_number'] event_cluster_index = 0 if assigned_hit_array[i] > 0: # Hit was already assigned to a cluster in the inner loop, thus skip actual hit continue if not _hit_ok( hit=hits[i], min_hit_charge=min_hit_charge, max_hit_charge=max_hit_charge) or (disabled_pixels.shape[0] != 0 and _pixel_masked(hits[i], disabled_pixels)): _set_hit_invalid(hit=hits[i], cluster_id=-1) assigned_hit_array[i] = 1 continue # Set/reset cluster variables for new cluster # Reset temp array with hit indices of actual cluster for the next cluster _set_1d_array(cluster_hit_indices, -1, cluster_size) cluster_hit_indices[0] = i assigned_hit_array[i] = 1 cluster_size = 1 # actual cluster has one hit so far for j in cluster_hit_indices: # Loop over all hits of the actual cluster; cluster_hit_indices is updated within the loop if new hit are found if j < 0: # There are no more cluster hits found break for k in range(cluster_hit_indices[0] + 1, total_hits): # Stop event hits loop if new event is reached if _new_event(hits[k]['event_number'], event_number): break # Hit is already assigned to a cluster, thus skip actual hit if assigned_hit_array[k] > 0: continue if not _hit_ok( hit=hits[k], min_hit_charge=min_hit_charge, max_hit_charge=max_hit_charge) or (disabled_pixels.shape[0] != 0 and _pixel_masked(hits[k], disabled_pixels)): _set_hit_invalid(hit=hits[k], cluster_id=-1) assigned_hit_array[k] = 1 continue # Check if event hit belongs to actual hit and thus to the actual cluster if _is_in_max_difference(hits[j]['column'], hits[k]['column'], column_cluster_distance) and _is_in_max_difference(hits[j]['row'], hits[k]['row'], row_cluster_distance) and _is_in_max_difference(hits[j]['frame'], hits[k]['frame'], frame_cluster_distance): if not ignore_same_hits or hits[j]['column'] != hits[k]['column'] or hits[j]['row'] != hits[k]['row']: cluster_size += 1 if cluster_size > max_cluster_hits: raise IndexError('cluster_hit_indices is too small to contain all cluster hits') cluster_hit_indices[cluster_size - 1] = k assigned_hit_array[k] = 1 else: _set_hit_invalid(hit=hits[k], cluster_id=-2) assigned_hit_array[k] = 1 # check for valid cluster and add it to the array if cluster_size == 1 and noisy_pixels.shape[0] != 0 and _pixel_masked(hits[cluster_hit_indices[0]], noisy_pixels): _set_hit_invalid(hit=hits[cluster_hit_indices[0]], cluster_id=-1) else: _finish_cluster( hits=hits, clusters=clusters, cluster_size=cluster_size, cluster_hit_indices=cluster_hit_indices, cluster_index=start_event_cluster_index + event_cluster_index, cluster_id=event_cluster_index, charge_correction=charge_correction, noisy_pixels=noisy_pixels, disabled_pixels=disabled_pixels) event_cluster_index += 1 # Last event is assumed to be finished at the end of the hit array, thus add info _finish_event( hits=hits, clusters=clusters, start_event_hit_index=start_event_hit_index, stop_event_hit_index=total_hits, start_event_cluster_index=start_event_cluster_index, stop_event_cluster_index=start_event_cluster_index + event_cluster_index) total_clusters = start_event_cluster_index + event_cluster_index return total_clusters
Main precompiled function that loopes over the hits and clusters them
def phase_by_transmission(g, window_size, copy=True): """Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. copy : bool, optional If False, attempt to phase genotypes in-place. Note that this is only possible if the input array has int8 dtype, otherwise a copy is always made regardless of this parameter. Returns ------- g : GenotypeArray Genotype array with progeny phased where possible. """ # setup g = np.asarray(g, dtype='i1') g = GenotypeArray(g, copy=copy) g._values = memoryview_safe(g.values) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # phase the progeny is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # phase the parents _opt_phase_parents_by_transmission(g.values, is_phased, window_size) return g
Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. copy : bool, optional If False, attempt to phase genotypes in-place. Note that this is only possible if the input array has int8 dtype, otherwise a copy is always made regardless of this parameter. Returns ------- g : GenotypeArray Genotype array with progeny phased where possible.
def _bld_pab_generic(self, funcname, **kwargs): """ implements a generic version of an attribute based pandas function """ margs = {'mtype': pab, 'kwargs': kwargs} setattr(self, funcname, margs)
implements a generic version of an attribute based pandas function
def apply_noise(data, noise): """ Applies noise to a sparse matrix. Noise can be an integer between 0 and 100, indicating the percentage of ones in the original input to move, or a float in [0, 1), indicating the same thing. The input matrix is modified in-place, and nothing is returned. This operation does not affect the sparsity of the matrix, or of any individual datapoint. """ if noise >= 1: noise = noise/100. for i in range(data.nRows()): ones = data.rowNonZeros(i)[0] replace_indices = numpy.random.choice(ones, size = int(len(ones)*noise), replace = False) for index in replace_indices: data[i, index] = 0 new_indices = numpy.random.choice(data.nCols(), size = int(len(ones)*noise), replace = False) for index in new_indices: while data[i, index] == 1: index = numpy.random.randint(0, data.nCols()) data[i, index] = 1
Applies noise to a sparse matrix. Noise can be an integer between 0 and 100, indicating the percentage of ones in the original input to move, or a float in [0, 1), indicating the same thing. The input matrix is modified in-place, and nothing is returned. This operation does not affect the sparsity of the matrix, or of any individual datapoint.
def groups_dynamic(self): """ returns dynamic relationship for groups - allowing for filtering of data """ return sa.orm.relationship( "Group", secondary="users_groups", lazy="dynamic", passive_deletes=True, passive_updates=True, )
returns dynamic relationship for groups - allowing for filtering of data
def _unpack_tableswitch(bc, offset): """ function for unpacking the tableswitch op arguments """ jump = (offset % 4) if jump: offset += (4 - jump) (default, low, high), offset = _unpack(_struct_iii, bc, offset) joffs = list() for _index in range((high - low) + 1): j, offset = _unpack(_struct_i, bc, offset) joffs.append(j) return (default, low, high, joffs), offset
function for unpacking the tableswitch op arguments
def __search_iterable(self, obj, item, parent="root", parents_ids=frozenset({})): """Search iterables except dictionaries, sets and strings.""" for i, thing in enumerate(obj): new_parent = "%s[%s]" % (parent, i) if self.__skip_this(thing, parent=new_parent): continue if self.case_sensitive or not isinstance(thing, strings): thing_cased = thing else: thing_cased = thing.lower() if thing_cased == item: self.__report( report_key='matched_values', key=new_parent, value=thing) else: item_id = id(thing) if parents_ids and item_id in parents_ids: continue parents_ids_added = add_to_frozen_set(parents_ids, item_id) self.__search(thing, item, "%s[%s]" % (parent, i), parents_ids_added)
Search iterables except dictionaries, sets and strings.
def identify_sep(filepath): """ Identifies the separator of data in a filepath. It reads the first line of the file and counts supported separators. Currently supported separators: ['|', ';', ',','\t',':'] """ ext = os.path.splitext(filepath)[1].lower() allowed_exts = ['.csv', '.txt', '.tsv'] assert ext in ['.csv', '.txt'], "Unexpected file extension {}. \ Supported extensions {}\n filename: {}".format( ext, allowed_exts, os.path.basename(filepath)) maybe_seps = ['|', ';', ',', '\t', ':'] with open(filepath,'r') as fp: header = fp.__next__() count_seps_header = {sep: _count(sep, header) for sep in maybe_seps} count_seps_header = {sep: count for sep, count in count_seps_header.items() if count > 0} if count_seps_header: return max(count_seps_header.__iter__(), key=(lambda key: count_seps_header[key])) else: raise Exception("Couldn't identify the sep from the header... here's the information:\n HEADER: {}\n SEPS SEARCHED: {}".format(header, maybe_seps))
Identifies the separator of data in a filepath. It reads the first line of the file and counts supported separators. Currently supported separators: ['|', ';', ',','\t',':']
def Nu_x(self, L, theta, Ts, **statef): """ Calculate the local Nusselt number. :param L: [m] characteristic length of the heat transfer surface :param theta: [°] angle of the surface with the vertical :param Ts: [K] heat transfer surface temperature :param Tf: [K] bulk fluid temperature :returns: float """ Tf = statef['T'] thetar = radians(theta) if self._isgas: self.Tr = Ts - 0.38 * (Ts - Tf) beta = self._fluid.beta(T=Tf) else: # for liquids self.Tr = Ts - 0.5 * (Ts - Tf) beta = self._fluid.beta(T=self.Tr) if Ts > Tf: # hot surface if 0.0 < theta < 45.0: g = const.g*cos(thetar) else: g = const.g else: # cold surface if -45.0 < theta < 0.0: g = const.g*cos(thetar) else: g = const.g nu = self._fluid.nu(T=self.Tr) alpha = self._fluid.alpha(T=self.Tr) Gr = dq.Gr(L, Ts, Tf, beta, nu, g) Pr = dq.Pr(nu, alpha) Ra = Gr * Pr eq = [self.equation_dict[r] for r in self.regions if r.contains_point(theta, Ra)][0] return eq(self, Ra, Pr)
Calculate the local Nusselt number. :param L: [m] characteristic length of the heat transfer surface :param theta: [°] angle of the surface with the vertical :param Ts: [K] heat transfer surface temperature :param Tf: [K] bulk fluid temperature :returns: float
def get_freq_map_and_normalizations(self, frequency_list, upper_freq_formula): """ If using the --vary-fupper capability we need to store the mapping between index and frequencies in the list. We also precalculate the normalization factor at every frequency, which is used when estimating overlaps to account for abrupt changes in termination frequency. Parameters ----------- frequency_list : array of floats The frequencies for which the metric has been computed and lie within the parameter space being considered. upper_freq_formula : string """ self.frequency_map = {} self.normalization_map = {} self.upper_freq_formula = upper_freq_formula # FIXME: Must this be sorted on input frequency_list.sort() for idx, frequency in enumerate(frequency_list): self.frequency_map[frequency] = idx self.normalization_map[frequency] = \ (self.metric_params.moments['I7'][frequency])**0.5
If using the --vary-fupper capability we need to store the mapping between index and frequencies in the list. We also precalculate the normalization factor at every frequency, which is used when estimating overlaps to account for abrupt changes in termination frequency. Parameters ----------- frequency_list : array of floats The frequencies for which the metric has been computed and lie within the parameter space being considered. upper_freq_formula : string
def _oauth_request_parameters(self, url, access_token, parameters={}, method="GET"): """Returns the OAuth parameters as a dict for the given request. parameters should include all POST arguments and query string arguments that will be sent with the request. """ consumer_token = self._oauth_consumer_token() base_args = dict( oauth_consumer_key=consumer_token["key"], oauth_token=access_token["key"], oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes), oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"), ) args = {} args.update(base_args) args.update(parameters) if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": signature = _oauth10a_signature(consumer_token, method, url, args, access_token) else: signature = _oauth_signature(consumer_token, method, url, args, access_token) base_args["oauth_signature"] = signature return base_args
Returns the OAuth parameters as a dict for the given request. parameters should include all POST arguments and query string arguments that will be sent with the request.
def unicode_to_bytes(s, encoding='utf-8', errors='replace'): """ Helper to convert unicode strings to bytes for data that needs to be written to on output stream (i.e. terminal) For Python 3 this should be called str_to_bytes :param str s: string to encode :param str encoding: utf-8 by default :param str errors: what to do when encoding fails :return: byte string utf-8 encoded """ return s if isinstance(s, str) else s.encode(encoding, errors)
Helper to convert unicode strings to bytes for data that needs to be written to on output stream (i.e. terminal) For Python 3 this should be called str_to_bytes :param str s: string to encode :param str encoding: utf-8 by default :param str errors: what to do when encoding fails :return: byte string utf-8 encoded