Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
371,100
def get_results(cmd): try: return subprocess.check_output(cmd) except subprocess.CalledProcessError as e: return e.output
def get_results(cmd: list) -> str: return lines Get the ping results using fping. :param cmd: List - the fping command and its options :return: String - raw string output containing csv fping results including the newline characters
371,101
def download(self, filename=None): tmp_file, f_suffix = download_file(self.url) if not filename is None: shutil.move(tmp_file, filename) return filename else: return tmp_file
Download an attachment. The files are currently not cached since they can be overwritten on the server. Parameters ---------- filename : string, optional Optional name for the file on local disk. Returns ------- string Path to downloaded temporary file on disk
371,102
def render_revalidation_failure(self, failed_step, form, **kwargs): self.storage.current_step = failed_step return redirect(self.url_name, step=failed_step)
When a step fails, we have to redirect the user to the first failing step.
371,103
def __start_commoncrawl_extractor(warc_download_url, callback_on_article_extracted=None, valid_hosts=None, start_date=None, end_date=None, strict_date=True, reuse_previously_downloaded_files=True, local_download_dir_warc=None, continue_after_error=True, show_download_progress=False, log_level=logging.ERROR, delete_warc_after_extraction=True, continue_process=True, log_pathname_fully_extracted_warcs=None): commoncrawl_extractor = CommonCrawlExtractor() commoncrawl_extractor.extract_from_commoncrawl(warc_download_url, callback_on_article_extracted, valid_hosts=valid_hosts, start_date=start_date, end_date=end_date, strict_date=strict_date, reuse_previously_downloaded_files=reuse_previously_downloaded_files, local_download_dir_warc=local_download_dir_warc, continue_after_error=continue_after_error, show_download_progress=show_download_progress, log_level=log_level, delete_warc_after_extraction=delete_warc_after_extraction, log_pathname_fully_extracted_warcs=__log_pathname_fully_extracted_warcs)
Starts a single CommonCrawlExtractor :param warc_download_url: :param callback_on_article_extracted: :param valid_hosts: :param start_date: :param end_date: :param strict_date: :param reuse_previously_downloaded_files: :param local_download_dir_warc: :param continue_after_error: :param show_download_progress: :param log_level: :return:
371,104
def enable_disable(self): if self.enabled: self.data[] = False else: self.data[] = True self.update()
Enable or disable this endpoint. If enabled, it will be disabled and vice versa. :return: None
371,105
def loadSignal(self, name, start=None, end=None): entry = self._getCacheEntry(name) if entry is not None: from analyser.common.signal import loadSignalFromWav return loadSignalFromWav(entry[], start=start, end=end) else: return None
Loads the named entry from the upload cache as a signal. :param name: the name. :param start: the time to start from in HH:mm:ss.SSS format :param end: the time to end at in HH:mm:ss.SSS format. :return: the signal if the named upload exists.
371,106
def generate_key_pair(size=2048, public_exponent=65537, as_string=True): private = rsa.generate_private_key( public_exponent=public_exponent, key_size=size, backend=default_backend() ) public = private.public_key() if not as_string: return private, public pem_private = private.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, NoEncryption()).decode(ENCODING) pem_public = public.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode(ENCODING) return pem_private, pem_public
Generate a public/private key pair. :param size: Optional. Describes how many bits long the key should be, larger keys provide more security, currently 1024 and below are considered breakable, and 2048 or 4096 are reasonable default key sizes for new keys. Defaults to 2048. :param public_exponent: Optional. Indicates what one mathematical property of the key generation will be. 65537 is the default and should almost always be used. :param as_string: Optional. If True, return tuple of strings. If false, return tuple of RSA key objects. Defaults to True. :return: (PrivateKey<string>, PublicKey<string>) :return: ( `RSAPrivateKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey>`_, `RSAPublicKey <https://cryptography.io/en/latest/hazmat/primitives/asymmetric/rsa/#cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey>`_)
371,107
def compute_curl(self, vector_field): A = 0.5 * numpy.sum(vector_field[self.idx_hierarchy], axis=0) sum_edge_dot_A = numpy.einsum("ijk, ijk->j", self.half_edge_coords, A) z = numpy.cross(self.half_edge_coords[0], self.half_edge_coords[1]) curl = z * (0.5 * sum_edge_dot_A / self.cell_volumes ** 2)[..., None] return curl
Computes the curl of a vector field over the mesh. While the vector field is point-based, the curl will be cell-based. The approximation is based on .. math:: n\\cdot curl(F) = \\lim_{A\\to 0} |A|^{-1} <\\int_{dGamma}, F> dr; see <https://en.wikipedia.org/wiki/Curl_(mathematics)>. Actually, to approximate the integral, one would only need the projection of the vector field onto the edges at the midpoint of the edges.
371,108
def __add_annotation_tier(self, docgraph, body, annotation_layer): layer_cat = annotation_layer.split()[-1] temp_tier = self.E(, {: "TIE{}".format(self.tier_count), : layer_cat, : "t", : "[{}]".format(annotation_layer)}) self.tier_count += 1 for node_id in select_nodes_by_layer(docgraph, annotation_layer): span_node_ids = get_span(docgraph, node_id) if span_node_ids: start_id, end_id = self.__span2event(span_node_ids) event_label = docgraph.node[node_id].get(, ) event = self.E(, {: "T{}".format(start_id), : "T{}".format(end_id)}, event_label) temp_tier.append(event) body.append(temp_tier)
adds a span-based annotation layer as a <tier> to the Exmaralda <body>. Parameter --------- docgraph : DiscourseDocumentGraph the document graph from which the chains will be extracted body : etree._Element an etree representation of the <basic_body> element (and all its descendants) of the Exmaralda file annotation_layer : str the name of a layer, e.g. 'tiger', 'tiger:token' or 'mmax:sentence'
371,109
def pulse_magnitude(time, magnitude, start, repeat_time=0): t = time() small = 1e-6 if repeat_time <= small: if abs(t - start) < time.step(): return magnitude * time.step() else: return 0 else: if abs((t - start) % repeat_time) < time.step(): return magnitude * time.step() else: return 0
Implements xmile's PULSE function PULSE: Generate a one-DT wide pulse at the given time Parameters: 2 or 3: (magnitude, first time[, interval]) Without interval or when interval = 0, the PULSE is generated only once Example: PULSE(20, 12, 5) generates a pulse value of 20/DT at time 12, 17, 22, etc. In rage [-inf, start) returns 0 In range [start + n * repeat_time, start + n * repeat_time + dt) return magnitude/dt In rage [start + n * repeat_time + dt, start + (n + 1) * repeat_time) return 0
371,110
def interactive(proto_dataset_uri): proto_dataset = dtoolcore.ProtoDataSet.from_uri( uri=proto_dataset_uri, config_path=CONFIG_PATH) readme_template = _get_readme_template() yaml = YAML() yaml.explicit_start = True yaml.indent(mapping=2, sequence=4, offset=2) descriptive_metadata = yaml.load(readme_template) descriptive_metadata = _prompt_for_values(descriptive_metadata) stream = StringIO() yaml.dump(descriptive_metadata, stream) proto_dataset.put_readme(stream.getvalue()) click.secho("Updated readme ", fg="green") click.secho("To edit the readme using your default editor:") click.secho( "dtool readme edit {}".format(proto_dataset_uri), fg="cyan")
Interactive prompting to populate the readme.
371,111
def _process_infohash_list(infohash_list): if isinstance(infohash_list, list): data = {: .join([h.lower() for h in infohash_list])} else: data = {: infohash_list.lower()} return data
Method to convert the infohash_list to qBittorrent API friendly values. :param infohash_list: List of infohash.
371,112
def status(self): return self.repository._repo.status(self._ctx.p1().node(), self._ctx.node())
Returns modified, added, removed, deleted files for current changeset
371,113
def get_longest_target_alignment_coords_by_name(self,name): longest = -1 coord = None for line in [self._lines[x] for x in self._name_to_num[name]]: if line[] & 2304 == 0: return [line[],line[]] return None sys.stderr.write("ERROR: no primary alignment set in index\n") sys.exit()
For a name get the best alignment :return: [filebyte,innerbyte] describing the to distance the zipped block start, and the distance within the unzipped block :rtype: list
371,114
def resources(self): used_resources = self._used_resources() ret = collections.defaultdict(dict) for resource, total in six.iteritems(self._resources): ret[resource][] = total if resource in used_resources: ret[resource][] = used_resources[resource] else: ret[resource][] = 0 return ret
get total resources and available ones
371,115
def angular_distance(first, second, bidirectional=True): lon1, lat1 = first lon2, lat2 = second lon1, lat1, lon2, lat2 = np.atleast_1d(lon1, lat1, lon2, lat2) xyz1 = sph2cart(lon1, lat1) xyz2 = sph2cart(lon2, lat2) dot = np.einsum(, xyz1, xyz2) angle = np.arccos(dot) if np.any(np.isnan(angle)): rtol = 1e-4 angle[np.isclose(dot, -1, rtol)] = np.pi angle[np.isclose(dot, 1, rtol)] = 0 if bidirectional: mask = angle > np.pi / 2 angle[mask] = np.pi - angle[mask] return angle
Calculate the angular distance between two linear features or elementwise angular distance between two sets of linear features. (Note: a linear feature in this context is a point on a stereonet represented by a single latitude and longitude.) Parameters ---------- first : (lon, lat) 2xN array-like or sequence of two numbers The longitudes and latitudes of the first measurements in radians. second : (lon, lat) 2xN array-like or sequence of two numbers The longitudes and latitudes of the second measurements in radians. bidirectional : boolean If True, only "inner" angles will be returned. In other words, all angles returned by this function will be in the range [0, pi/2] (0 to 90 in degrees). Otherwise, ``first`` and ``second`` will be treated as vectors going from the origin outwards instead of bidirectional infinite lines. Therefore, with ``bidirectional=False``, angles returned by this function will be in the range [0, pi] (zero to 180 degrees). Returns ------- dist : array The elementwise angular distance between each pair of measurements in (lon1, lat1) and (lon2, lat2). Examples -------- Calculate the angle between two lines specified as a plunge/bearing >>> angle = angular_distance(line(30, 270), line(40, 90)) >>> np.degrees(angle) array([ 70.]) Let's do the same, but change the "bidirectional" argument: >>> first, second = line(30, 270), line(40, 90) >>> angle = angular_distance(first, second, bidirectional=False) >>> np.degrees(angle) array([ 110.]) Calculate the angle between two planes. >>> angle = angular_distance(pole(0, 10), pole(180, 10)) >>> np.degrees(angle) array([ 20.])
371,116
def save(self, path_or_file, strict=True, fmt=): self.validate(strict=strict) with _open(path_or_file, mode=, fmt=fmt) as fdesc: json.dump(self.__json__, fdesc, indent=2)
Serialize annotation as a JSON formatted stream to file. Parameters ---------- path_or_file : str or file-like Path to save the JAMS object on disk OR An open file descriptor to write into strict : bool Force strict schema validation fmt : str ['auto', 'jams', 'jamz'] The output encoding format. If `auto`, it is inferred from the file name. If the input is an open file handle, `jams` encoding is used. Raises ------ SchemaError If `strict == True` and the JAMS object fails schema or namespace validation. See also -------- validate
371,117
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None, allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None, allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None, mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None, multiLine=None, allowUnquotedControlChars=None, lineSep=None, samplingRatio=None, dropFieldIfAllNull=None, encoding=None, locale=None): self._set_opts( schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal, allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames, allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero, allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter, mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat, timestampFormat=timestampFormat, multiLine=multiLine, allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep, samplingRatio=samplingRatio, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding, locale=locale) if isinstance(path, basestring): path = [path] if type(path) == list: return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path))) elif isinstance(path, RDD): def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = path.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) return self._df(self._jreader.json(jrdd)) else: raise TypeError("path can be only string, list or RDD")
Loads JSON files and returns the results as a :class:`DataFrame`. `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default. For JSON (one record per file), set the ``multiLine`` parameter to ``true``. If the ``schema`` parameter is not specified, this function goes through the input once to determine the input schema. :param path: string represents path to the JSON dataset, or a list of paths, or RDD of Strings storing JSON objects. :param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``). :param primitivesAsString: infers all primitive values as a string type. If None is set, it uses the default value, ``false``. :param prefersDecimal: infers all floating-point values as a decimal type. If the values do not fit in decimal, then it infers them as doubles. If None is set, it uses the default value, ``false``. :param allowComments: ignores Java/C++ style comment in JSON records. If None is set, it uses the default value, ``false``. :param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set, it uses the default value, ``false``. :param allowSingleQuotes: allows single quotes in addition to double quotes. If None is set, it uses the default value, ``true``. :param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is set, it uses the default value, ``false``. :param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character using backslash quoting mechanism. If None is set, it uses the default value, ``false``. :param mode: allows a mode for dealing with corrupt records during parsing. If None is set, it uses the default value, ``PERMISSIVE``. * ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \ into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \ fields to ``null``. To keep corrupt records, an user can set a string type \ field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \ schema does not have the field, it drops corrupt records during parsing. \ When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \ field in an output schema. * ``DROPMALFORMED`` : ignores the whole corrupted records. * ``FAILFAST`` : throws an exception when it meets corrupted records. :param columnNameOfCorruptRecord: allows renaming the new field having malformed string created by ``PERMISSIVE`` mode. This overrides ``spark.sql.columnNameOfCorruptRecord``. If None is set, it uses the value specified in ``spark.sql.columnNameOfCorruptRecord``. :param dateFormat: sets the string that indicates a date format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to date type. If None is set, it uses the default value, ``yyyy-MM-dd``. :param timestampFormat: sets the string that indicates a timestamp format. Custom date formats follow the formats at ``java.time.format.DateTimeFormatter``. This applies to timestamp type. If None is set, it uses the default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``. :param multiLine: parse one record, which may span multiple lines, per file. If None is set, it uses the default value, ``false``. :param allowUnquotedControlChars: allows JSON Strings to contain unquoted control characters (ASCII characters with value less than 32, including tab and line feed characters) or not. :param encoding: allows to forcibly set one of standard basic or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If None is set, the encoding of input JSON will be detected automatically when the multiLine option is set to ``true``. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. :param samplingRatio: defines fraction of input JSON objects used for schema inferring. If None is set, it uses the default value, ``1.0``. :param dropFieldIfAllNull: whether to ignore column of all null values or empty array/struct during schema inference. If None is set, it uses the default value, ``false``. :param locale: sets a locale as language tag in IETF BCP 47 format. If None is set, it uses the default value, ``en-US``. For instance, ``locale`` is used while parsing dates and timestamps. >>> df1 = spark.read.json('python/test_support/sql/people.json') >>> df1.dtypes [('age', 'bigint'), ('name', 'string')] >>> rdd = sc.textFile('python/test_support/sql/people.json') >>> df2 = spark.read.json(rdd) >>> df2.dtypes [('age', 'bigint'), ('name', 'string')]
371,118
def to_dict(self): json_dict = {"apiKey": self.api_key, "userArn": self.user_arn, "cognitoAuthenticationType": self.cognito_authentication_type, "caller": self.caller, "userAgent": self.user_agent, "user": self.user, "cognitoIdentityPoolId": self.cognito_identity_pool_id, "cognitoAuthenticationProvider": self.cognito_authentication_provider, "sourceIp": self.source_ip, "accountId": self.account_id } return json_dict
Constructs an dictionary representation of the Identity Object to be used in serializing to JSON :return: dict representing the object
371,119
def button_change_send(self, time_boot_ms, last_change_ms, state, force_mavlink1=False): return self.send(self.button_change_encode(time_boot_ms, last_change_ms, state), force_mavlink1=force_mavlink1)
Report button state change time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) last_change_ms : Time of last change of button state (uint32_t) state : Bitmap state of buttons (uint8_t)
371,120
def isbase(path1, path2): _path1 = forcedir(abspath(path1)) _path2 = forcedir(abspath(path2)) return _path2.startswith(_path1)
Check if ``path1`` is a base of ``path2``. Arguments: path1 (str): A PyFilesytem path. path2 (str): A PyFilesytem path. Returns: bool: `True` if ``path2`` starts with ``path1`` Example: >>> isbase('foo/bar', 'foo/bar/baz/egg.txt') True
371,121
def multicore(function, cores, multiargs, **singleargs): tblib.pickling_support.install() if sys.version_info >= (3, 0): check = inspect.getfullargspec(function) varkw = check.varkw else: check = inspect.getargspec(function) varkw = check.keywords if not check.varargs and not varkw: multiargs_check = [x for x in multiargs if x not in check.args] singleargs_check = [x for x in singleargs if x not in check.args] if len(multiargs_check) > 0: raise AttributeError(.format(.join(multiargs_check))) if len(singleargs_check) > 0: raise AttributeError(.format(.join(singleargs_check))) arglengths = list(set([len(multiargs[x]) for x in multiargs])) if len(arglengths) > 1: raise AttributeError() cores = cores if arglengths[0] >= cores else arglengths[0] processlist = [dictmerge(dict([(arg, multiargs[arg][i]) for arg in multiargs]), singleargs) for i in range(len(multiargs[list(multiargs.keys())[0]]))] if platform.system() == : script = os.path.join(os.path.dirname(os.path.realpath(__file__)), ) tmpfile = os.path.join(tempfile.gettempdir(), ) if not dill.pickles([function, cores, processlist]): raise RuntimeError( ) with open(tmpfile, ) as tmp: dill.dump([function, cores, processlist], tmp, byref=False) proc = sp.Popen([sys.executable, script], stdin=sp.PIPE, stderr=sp.PIPE) out, err = proc.communicate() if proc.returncode != 0: raise RuntimeError(err.decode()) with open(tmpfile, ) as tmp: result = dill.load(tmp) return result else: results = None def wrapper(**kwargs): try: return function(**kwargs) except Exception as e: return ExceptionWrapper(e) with HiddenPrints(): try: pool = mp.Pool(processes=cores) except NameError: raise ImportError("package could not be imported") results = pool.imap(lambda x: wrapper(**x), processlist) pool.close() pool.join() i = 0 out = [] for item in results: if isinstance(item, ExceptionWrapper): item.ee = type(item.ee)(str(item.ee) + "\n(called function with args {})" .format(function.__name__, processlist[i])) raise (item.re_raise()) out.append(item) i += 1 eval = [x for x in out if x is not None] if len(eval) == 0: return None else: return out
wrapper for multicore process execution Parameters ---------- function individual function to be applied to each process item cores: int the number of subprocesses started/CPUs used; this value is reduced in case the number of subprocesses is smaller multiargs: dict a dictionary containing sub-function argument names as keys and lists of arguments to be distributed among the processes as values singleargs all remaining arguments which are invariant among the subprocesses Returns ------- None or list the return of the function for all subprocesses Notes ----- - all `multiargs` value lists must be of same length, i.e. all argument keys must be explicitly defined for each subprocess - all function arguments passed via `singleargs` must be provided with the full argument name and its value (i.e. argname=argval); default function args are not accepted - if the processes return anything else than None, this function will return a list of results - if all processes return None, this function will be of type void Examples -------- >>> def add(x, y, z): >>> return x + y + z >>> multicore(add, cores=2, multiargs={'x': [1, 2]}, y=5, z=9) [15, 16] >>> multicore(add, cores=2, multiargs={'x': [1, 2], 'y': [5, 6]}, z=9) [15, 17] See Also -------- :mod:`pathos.multiprocessing`
371,122
def __check_label_image(label_image): encountered_indices = scipy.unique(label_image) expected_indices = scipy.arange(1, label_image.max() + 1) if not encountered_indices.size == expected_indices.size or \ not (encountered_indices == expected_indices).all(): raise AttributeError()
Check the label image for consistent labelling starting from 1.
371,123
def create_job(self, builder_job, wf_job=None, is_output=False ): copied = copy.deepcopy(builder_job) relativised_input_objecttemp = {} self._relativise_files(copied) def jdefault(o): return dict(o) if is_output: rel_path = posixpath.join(_posix_path(WORKFLOW), "primary-output.json") else: rel_path = posixpath.join(_posix_path(WORKFLOW), "primary-job.json") j = json_dumps(copied, indent=4, ensure_ascii=False, default=jdefault) with self.write_bag_file(rel_path) as file_path: file_path.write(j + u"\n") _logger.debug(u"[provenance] Generated customised job file: %s", rel_path) relativised_input_objecttemp = {} for key, value in copied.items(): if isinstance(value, MutableMapping): if value.get("class") in ("File", "Directory"): relativised_input_objecttemp[key] = value else: relativised_input_objecttemp[key] = value self.relativised_input_object.update( {k: v for k, v in relativised_input_objecttemp.items() if v}) return self.relativised_input_object
Generate the new job object with RO specific relative paths.
371,124
def parse_url_or_log(url, encoding=): try: url_info = URLInfo.parse(url, encoding=encoding) except ValueError as error: _logger.warning(__( _(), url=wpull.string.printable_str(url), error=error)) else: return url_info
Parse and return a URLInfo. This function logs a warning if the URL cannot be parsed and returns None.
371,125
def queryset(self, request, queryset): if self.value(): try: section = SectionPage.objects.get(id=self.value()) return queryset.child_of(section).all() except (ObjectDoesNotExist, MultipleObjectsReturned): return None
Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`.
371,126
def __make_request(self, url, method, data, auth, cookies, headers, proxies, timeout, verify): request_by_method = getattr(requests, method) return request_by_method( url=url, data=data, auth=auth, cookies=cookies, headers=headers, proxies=proxies, timeout=timeout, verify=verify, allow_redirects=True, stream=False )
Execute a request with the given data. Args: url (str): The URL to call. method (str): The method (e.g. `get` or `post`). data (str): The data to call the URL with. auth (obj): The authentication class. cookies (obj): The cookie dict. headers (obj): The header dict. proxies (obj): The proxies dict. timeout (int): The request timeout in seconds. verify (mixed): SSL verification. Returns: obj: The response object.
371,127
def solar_irradiation(latitude, longitude, Z, moment, surface_tilt, surface_azimuth, T=None, P=None, solar_constant=1366.1, atmos_refract=0.5667, albedo=0.25, linke_turbidity=None, extraradiation_method=, airmass_model=, cache=None): rs disk (at a standardized distance of 1 AU); this constant is independent of activity or conditions on earth, but will vary throughout the suns position on the amount of radiation which reaches earth according to the methods available in the `pvlib` library, [-] airmass_model : str, optional The specified method to calculate the amount of air the sunlight needs to travel through to reach the earth according to the methods available in the `pvlib` library, [-] cache : dict, optional Dictionary to to check for values to use to skip some calculations; `apparent_zenith`, `zenith`, `azimuth` supported, [-] Returns ------- poa_global : float The total irradiance in the plane of the surface, [W/m^2] poa_direct : float The total beam irradiance in the plane of the surface, [W/m^2] poa_diffuse : float The total diffuse irradiance in the plane of the surface, [W/m^2] poa_sky_diffuse : float The sky component of the diffuse irradiance, excluding the impact from the ground, [W/m^2] poa_ground_diffuse : float The ground-sky diffuse irradiance component, [W/m^2] Examples -------- >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 13, 43, 5), surface_tilt=41.0, ... surface_azimuth=180.0) (1065.7621896280812, 945.2656564506323, 120.49653317744884, 95.31535344213178, 25.181179735317063) >>> cache = {: 41.099082295767545, : 41.11285376417578, : 182.5631874250523} >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 13, 43, 5), surface_tilt=41.0, ... linke_turbidity=3, T=300, P=1E5, ... surface_azimuth=180.0, cache=cache) (1042.5677703677097, 918.2377548545295, 124.33001551318027, 99.6228657378363, 24.70714977534396) At night, there is no solar radiation and this function returns zeros: >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 2, 43, 5), surface_tilt=41.0, ... surface_azimuth=180.0) (0.0, -0.0, 0.0, 0.0, 0.0) Notes ----- The retrieval of `linke_turbidity` requires the pytables library (and Pandas); if it is not installed, specify a value of `linke_turbidity` to avoid the dependency. There is some redundancy of the calculated results, according to the following relations. The total irradiance is normally that desired for engineering calculations. poa_diffuse = poa_ground_diffuse + poa_sky_diffuse poa_global = poa_direct + poa_diffuse FOr a surface such as a pipe or vessel, an approach would be to split it into a number of rectangles and sum up the radiation absorbed by each. This calculation is fairly slow. References ---------- .. [1] Will Holmgren, Calama-Consulting, Tony Lorenzo, Uwe Krien, bmu, DaCoEx, mayudong, et al. Pvlib/Pvlib-Python: 0.5.1. Zenodo, 2017. https://doi.org/10.5281/zenodo.1016425. spencerzenithzenithapparent_zenithazimuthUnrecognized airmass modelghidnidhipoa_globalpoa_directpoa_diffusepoa_sky_diffusepoa_ground_diffuse']) return (poa_global, poa_direct, poa_diffuse, poa_sky_diffuse, poa_ground_diffuse)
r'''Calculates the amount of solar radiation and radiation reflected back the atmosphere which hits a surface at a specified tilt, and facing a specified azimuth. This functions is a wrapper for the incredibly comprehensive `pvlib library <https://github.com/pvlib/pvlib-python>`_, and requires it to be installed. Parameters ---------- latitude : float Latitude, between -90 and 90 [degrees] longitude : float Longitude, between -180 and 180, [degrees] Z : float, optional Elevation above sea level for the position, [m] moment : datetime Time and date for the calculation, in local UTC time (not daylight savings time), [-] surface_tilt : float The angle above the horizontal of the object being hit by radiation, [degrees] surface_azimuth : float The angle the object is facing (positive North eastwards 0° to 360°), [degrees] T : float, optional Temperature of atmosphere at ground level, [K] P : float, optional Pressure of atmosphere at ground level, [Pa] solar_constant : float, optional The amount of solar radiation which reaches earth's disk (at a standardized distance of 1 AU); this constant is independent of activity or conditions on earth, but will vary throughout the sun's lifetime and may increase or decrease slightly due to solar activity, [W/m^2] atmos_refract : float, optional Atmospheric refractivity at sunrise/sunset (0.5667 deg is an often used value; this varies substantially and has an impact of a few minutes on when sunrise and sunset is), [degrees] albedo : float, optional The average amount of reflection of the terrain surrounding the object at quite a distance; this impacts how much sunlight reflected off the ground, gest reflected back off clouds, [-] linke_turbidity : float, optional The amount of pollution/water in the sky versus a perfect clear sky; If not specified, this will be retrieved from a historical grid; typical values are 3 for cloudy, and 7 for severe pollution around a city, [-] extraradiation_method : str, optional The specified method to calculate the effect of earth's position on the amount of radiation which reaches earth according to the methods available in the `pvlib` library, [-] airmass_model : str, optional The specified method to calculate the amount of air the sunlight needs to travel through to reach the earth according to the methods available in the `pvlib` library, [-] cache : dict, optional Dictionary to to check for values to use to skip some calculations; `apparent_zenith`, `zenith`, `azimuth` supported, [-] Returns ------- poa_global : float The total irradiance in the plane of the surface, [W/m^2] poa_direct : float The total beam irradiance in the plane of the surface, [W/m^2] poa_diffuse : float The total diffuse irradiance in the plane of the surface, [W/m^2] poa_sky_diffuse : float The sky component of the diffuse irradiance, excluding the impact from the ground, [W/m^2] poa_ground_diffuse : float The ground-sky diffuse irradiance component, [W/m^2] Examples -------- >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 13, 43, 5), surface_tilt=41.0, ... surface_azimuth=180.0) (1065.7621896280812, 945.2656564506323, 120.49653317744884, 95.31535344213178, 25.181179735317063) >>> cache = {'apparent_zenith': 41.099082295767545, 'zenith': 41.11285376417578, 'azimuth': 182.5631874250523} >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 13, 43, 5), surface_tilt=41.0, ... linke_turbidity=3, T=300, P=1E5, ... surface_azimuth=180.0, cache=cache) (1042.5677703677097, 918.2377548545295, 124.33001551318027, 99.6228657378363, 24.70714977534396) At night, there is no solar radiation and this function returns zeros: >>> solar_irradiation(Z=1100.0, latitude=51.0486, longitude=-114.07, ... moment=datetime(2018, 4, 15, 2, 43, 5), surface_tilt=41.0, ... surface_azimuth=180.0) (0.0, -0.0, 0.0, 0.0, 0.0) Notes ----- The retrieval of `linke_turbidity` requires the pytables library (and Pandas); if it is not installed, specify a value of `linke_turbidity` to avoid the dependency. There is some redundancy of the calculated results, according to the following relations. The total irradiance is normally that desired for engineering calculations. poa_diffuse = poa_ground_diffuse + poa_sky_diffuse poa_global = poa_direct + poa_diffuse FOr a surface such as a pipe or vessel, an approach would be to split it into a number of rectangles and sum up the radiation absorbed by each. This calculation is fairly slow. References ---------- .. [1] Will Holmgren, Calama-Consulting, Tony Lorenzo, Uwe Krien, bmu, DaCoEx, mayudong, et al. Pvlib/Pvlib-Python: 0.5.1. Zenodo, 2017. https://doi.org/10.5281/zenodo.1016425.
371,128
def setPositionLinkedTo(self, widgets): if type(widgets) in (list, set, tuple): new_widgets = list(widgets) else: new_widgets = [] widget = widgets while widget: widget.installEventFilter(self) new_widgets.append(widget) widget = widget.parent() self._positionLinkedTo = new_widgets
Sets the widget that this popup will be linked to for positional changes. :param widgets | <QWidget> || [<QWidget>, ..]
371,129
def run(command, parser, cl_args, unknown_args): Log.debug("Restart Args: %s", cl_args) container_id = cl_args[] if cl_args[] == config.SERVER_MODE: dict_extra_args = {"container_id": str(container_id)} return cli_helper.run_server(command, cl_args, "restart topology", extra_args=dict_extra_args) else: list_extra_args = ["--container_id", str(container_id)] return cli_helper.run_direct(command, cl_args, "restart topology", extra_args=list_extra_args)
:param command: :param parser: :param cl_args: :param unknown_args: :return:
371,130
def compute_cumsum( df, id_cols: List[str], reference_cols: List[str], value_cols: List[str], new_value_cols: List[str] = None, cols_to_keep: List[str] = None ): if cols_to_keep is None: cols_to_keep = [] if new_value_cols is None: new_value_cols = value_cols if len(value_cols) != len(new_value_cols): raise ParamsValueError( ) check_params_columns_duplicate(id_cols + reference_cols + cols_to_keep + value_cols) levels = list(range(0, len(id_cols))) df = df.groupby(id_cols + reference_cols + cols_to_keep).sum() df[new_value_cols] = df.groupby(level=levels)[value_cols].cumsum() return df.reset_index()
Compute cumsum for a group of columns. --- ### Parameters *mandatory :* - `id_cols` (*list*): the columns id to create each group - `reference_cols` (*list*): the columns to order the cumsum - `value_cols` (*list*): the columns to cumsum *optional :* - `new_value_cols` (*list*): the new columns with the result cumsum - `cols_to_keep` (*list*): other columns to keep in the dataset. This option can be used if there is only one row by group [id_cols + reference_cols] --- ### Example **Input** MONTH | DAY | NAME | VALUE | X :---:|:---:|:--:|:---:|:---: 1 | 1 | A | 1 | lo 2 | 1 | A | 1 | lo 2 | 15 | A | 1 | la 1 | 15 | B | 1 | la ```cson compute_cumsum: id_cols: ['NAME'] reference_cols: ['MONTH', 'DAY'] cumsum_cols: ['VALUE'] cols_to_keep: ['X'] ``` **Output** NAME | MONTH | DAY | X | VALUE :---:|:---:|:--:|:---:|:---: A | 1 | 1 | lo | 1 A | 2 | 1 | la | 2 A | 2 | 15 | lo | 3 B | 1 | 15 | la | 1
371,131
def on_cache_changed(self, direct, which=None): for what in [direct, which]: ind_id = self.id(what) _, cache_ids = self.cached_input_ids.get(ind_id, [None, []]) for cache_id in cache_ids: self.inputs_changed[cache_id] = True
A callback funtion, which sets local flags when the elements of some cached inputs change this function gets 'hooked up' to the inputs when we cache them, and upon their elements being changed we update here.
371,132
def byte_number_string( number, thousandsSep=True, partition=False, base1024=True, appendBytes=True ): magsuffix = "" bytesuffix = "" if partition: magnitude = 0 if base1024: while number >= 1024: magnitude += 1 number = number >> 10 else: while number >= 1000: magnitude += 1 number /= 1000.0 magsuffix = ["", "K", "M", "G", "T", "P"][magnitude] if appendBytes: if number == 1: bytesuffix = " Byte" else: bytesuffix = " Bytes" if thousandsSep and (number >= 1000 or magsuffix): snum = "{:,d}".format(number) else: snum = str(number) return "{}{}{}".format(snum, magsuffix, bytesuffix)
Convert bytes into human-readable representation.
371,133
def extend_name(suffix): def dec(cls): name = .format(cls.__name__, suffix) setattr(cls, , name) return cls return dec
A factory for class decorators that modify the class name by appending some text to it. Example: @extend_name('_Foo') class Class: pass assert Class.__name__ == 'Class_Foo'
371,134
def _write_local_data_files(self, cursor): schema = list(map(lambda schema_tuple: schema_tuple[0].replace(, ), cursor.description)) file_no = 0 tmp_file_handle = NamedTemporaryFile(delete=True) tmp_file_handles = {self.filename.format(file_no): tmp_file_handle} for row in cursor: row = map(self.convert_types, row) row_dict = dict(zip(schema, row)) s = json.dumps(row_dict, sort_keys=True) s = s.encode() tmp_file_handle.write(s) tmp_file_handle.write(b) if tmp_file_handle.tell() >= self.approx_max_file_size_bytes: file_no += 1 tmp_file_handle = NamedTemporaryFile(delete=True) tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle return tmp_file_handles
Takes a cursor, and writes results to a local file. :return: A dictionary where keys are filenames to be used as object names in GCS, and values are file handles to local files that contain the data for the GCS objects.
371,135
def fsqrt(q): if q == 0: return q, 1 if q < 0: raise ValueError( % q) a, b = isqrt(q.numerator) c, d = isqrt(q.denominator) return Fraction(a, c * d), b * d
given a non-negative fraction q, return a pair (a,b) such that q = a * a * b where b is a square-free integer. if q is a perfect square, a is its square root and b is one.
371,136
def get_page_objects_by_ext_type(context, object_type): try: objects = context[][][object_type] except KeyError: raise template.TemplateSyntaxError(.format(object_type)) return objects
**Arguments** ``object_type`` object type :return selected objects
371,137
def patch_traces( self, project_id, traces, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "patch_traces" not in self._inner_api_calls: self._inner_api_calls[ "patch_traces" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.patch_traces, default_retry=self._method_configs["PatchTraces"].retry, default_timeout=self._method_configs["PatchTraces"].timeout, client_info=self._client_info, ) request = trace_pb2.PatchTracesRequest(project_id=project_id, traces=traces) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("project_id", project_id)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) self._inner_api_calls["patch_traces"]( request, retry=retry, timeout=timeout, metadata=metadata )
Sends new traces to Stackdriver Trace or updates existing traces. If the ID of a trace that you send matches that of an existing trace, any fields in the existing trace and its spans are overwritten by the provided values, and any new fields provided are merged with the existing trace data. If the ID does not match, a new trace is created. Example: >>> from google.cloud import trace_v1 >>> >>> client = trace_v1.TraceServiceClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # TODO: Initialize `traces`: >>> traces = {} >>> >>> client.patch_traces(project_id, traces) Args: project_id (str): ID of the Cloud project where the trace data is stored. traces (Union[dict, ~google.cloud.trace_v1.types.Traces]): The body of the message. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.trace_v1.types.Traces` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
371,138
def check_lengths(*arrays): lengths = [len(array) for array in arrays] if len(np.unique(lengths)) > 1: raise ValueError(.format(lengths))
tool to ensure input and output data have the same number of samples Parameters ---------- *arrays : iterable of arrays to be checked Returns ------- None
371,139
def apply_update(self, doc, update_spec): if not in update_spec and not in update_spec: update_spec[] = doc[self.unique_key] return update_spec for to_set in update_spec.get("$set", []): value = update_spec[][to_set] keys_to_pop = [] for key in doc: if key.startswith(to_set): if key == to_set or key[len(to_set)] == : keys_to_pop.append(key) for key in keys_to_pop: doc.pop(key) doc[to_set] = value for to_unset in update_spec.get("$unset", []): keys_to_pop.append(key) for key in keys_to_pop: doc.pop(key) return doc
Override DocManagerBase.apply_update to have flat documents.
371,140
def register(cls, use_admin=True): with app.app_context(): if getattr(current_app, , None) is None: current_app.class_references = {} if isinstance(cls, (list, tuple)): for entry in cls: register_internal_data(entry) entry.use_admin = use_admin else: register_internal_data(cls) cls.use_admin = use_admin
Register with the API a :class:`sandman.model.Model` class and associated endpoint. :param cls: User-defined class derived from :class:`sandman.model.Model` to be registered with the endpoint returned by :func:`endpoint()` :type cls: :class:`sandman.model.Model` or tuple
371,141
def get_owner(self, default=True): uid, gid = self.owner if not uid and default: uid = os.getuid() if not gid and default: gid = os.getgid() return uid, gid
Return (User ID, Group ID) tuple :param bool default: Whether to return default if not set. :rtype: tuple[int, int]
371,142
def is_email(): email = ( ur ur ur ur ur ur ur ur ) regex = re.compile(email, re.IGNORECASE | re.UNICODE) def validate(value): if not regex.match(value): return e("{} is not a valid email address", value) return validate
Validates that a fields value is a valid email address.
371,143
def update_user(self, user_id, **kwargs): properties = {} for attr, value in kwargs.items(): properties[self._underscore_to_camelcase(attr)] = value data = { "properties": properties } response = self._perform_request( url= % user_id, method=, data=json.dumps(data)) return response
Updates a user. :param user_id: The unique ID of the user. :type user_id: ``str``
371,144
def build_tree(self, *args, **kwargs): assert len(self) > 3 algorithm = kwargs.pop(kwargs, None) if algorithm is None: algorithm = if algorithm is : return self.build_tree_raxml(*args, **kwargs) if algorithm is : return self.build_tree_fast(*args, **kwargs)
Dispatch a tree build call. Note that you need at least four taxa to express some evolutionary history on an unrooted tree.
371,145
def mqtt_connected(func): @asyncio.coroutine @wraps(func) def wrapper(self, *args, **kwargs): if not self._connected_state.is_set(): base_logger.warning("Client not connected, waiting for it") _, pending = yield from asyncio.wait([self._connected_state.wait(), self._no_more_connections.wait()], return_when=asyncio.FIRST_COMPLETED) for t in pending: t.cancel() if self._no_more_connections.is_set(): raise ClientException("Will not reconnect") return (yield from func(self, *args, **kwargs)) return wrapper
MQTTClient coroutines decorator which will wait until connection before calling the decorated method. :param func: coroutine to be called once connected :return: coroutine result
371,146
def is_dot(ip): octets = str(ip).split() if len(octets) != 4: return False for i in octets: try: val = int(i) except ValueError: return False if val > 255 or val < 0: return False return True
Return true if the IP address is in dotted decimal notation.
371,147
def export(self, file_path=None, export_format=None): with io.open(file_path, mode=, encoding="utf-8") as export_file: if export_format == : import yaml yaml.safe_dump(self.to_dict(), export_file, default_flow_style=False) elif export_format == : export_file.write(text_type(json.dumps(self.to_dict(), ensure_ascii=False))) return True
Write the users to a file.
371,148
def _plot_histogram(series, bins=10, figsize=(6, 4), facecolor=): if base.get_vartype(series) == base.TYPE_DATE: fig = plt.figure(figsize=figsize) plot = fig.add_subplot(111) plot.set_ylabel() try: plot.hist(series.dropna().values, facecolor=facecolor, bins=bins) except TypeError: facecolor=facecolor, bins=bins) return plot
Plot an histogram from the data and return the AxesSubplot object. Parameters ---------- series : Series The data to plot figsize : tuple The size of the figure (width, height) in inches, default (6,4) facecolor : str The color code. Returns ------- matplotlib.AxesSubplot The plot.
371,149
def save_performance(db, job_id, records): rows = [(job_id, rec[], rec[], rec[], int(rec[])) for rec in records] db.insert(, .split(), rows)
Save in the database the performance information about the given job. :param db: a :class:`openquake.server.dbapi.Db` instance :param job_id: a job ID :param records: a list of performance records
371,150
def needs_sync(self): if self.lastforce is None: self.lastforce = time.time() return time.time() - self.lastforce >= self.forceipchangedetection_sleep
Check if enough time has elapsed to perform a sync(). A call to sync() should be performed every now and then, no matter what has_state_changed() says. This is really just a safety thing to enforce consistency in case the state gets messed up. :rtype: boolean
371,151
def unique_cpx_roots(rlist,tol = 0.001): uniq = [rlist[0]] mult = [1] for k in range(1,len(rlist)): N_uniq = len(uniq) for m in range(N_uniq): if abs(rlist[k]-uniq[m]) <= tol: mult[m] += 1 uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m]) break uniq = np.hstack((uniq,rlist[k])) mult = np.hstack((mult,[1])) return np.array(uniq), np.array(mult)
The average of the root values is used when multiplicity is greater than one. Mark Wickert October 2016
371,152
def xdg_config_dir(): xdg_config = os.getenv(, os.path.expanduser()) xdg_config_directory = os.path.join(xdg_config, ) return xdg_config_directory
Check xdg locations for config files
371,153
def set_generator_training_nb(self, number): self.samples_per_epoch = number diff_to_batch = number % self.get_batch_size() if diff_to_batch > 0: self.samples_per_epoch += self.get_batch_size() - diff_to_batch
sets self.samples_per_epoch which is used in model.fit if input is a generator :param number: :return:
371,154
def get_input(prompt, check, *, redo_prompt=None, repeat_prompt=False): if isinstance(check, str): check = (check,) to_join = [] for item in check: if item: to_join.append(str(item)) else: to_join.append("") prompt += " [{}]: ".format(.join(to_join)) if repeat_prompt: redo_prompt = prompt elif not redo_prompt: redo_prompt = "Incorrect input, please choose from {}: " \ "".format(str(check)) if callable(check): def _checker(r): return check(r) elif isinstance(check, tuple): def _checker(r): return r in check else: raise ValueError(RESPONSES_ERROR.format(type(check))) response = input(prompt) while not _checker(response): print(response, type(response)) response = input(redo_prompt if redo_prompt else prompt) return response
Ask the user to input something on the terminal level, check their response and ask again if they didn't answer correctly
371,155
def gauge(self, name, value, rate=1): if self._should_send_metric(name, rate): if not is_numeric(value): value = float(value) self._request( Gauge( self._create_metric_name_for_request(name), value, rate ).to_request() )
Send a Gauge metric with the specified value
371,156
def addElement(self,etype=,corners=[-1.0,-1.0,-1.0,1.,-1.0,-1.0,1.0,1.0,-1.0,-1.0,1.0,-1.0,-1.0,-1.0,1.0,1.0,-1.0,1.0,1.0,1.0,1.0,-1.0,1.0,1.0],name=): lastelm = self.elements[-1][1] lastnode = self.nodes[-1][0] elm = [etype,lastelm+1] for i in range(old_div(len(corners),3)): elm.append(lastnode+1+i) self.elements.append(elm) self.elsets[+name] = {} self.elsets[+name][int(elm[1])] = True cnt = 1 self.nsets[+name] = [] for i in range(0,len(corners),3): self.nodes.append([lastnode+cnt, corners[i], corners[i+1], corners[i+2]]) self.nsets[+name].append(lastnode+cnt) cnt += 1 if etype == or etype == : self.fsets[+name] = [[etype, MeshDef.facetID, lastnode+1, lastnode+2, lastnode+3, lastnode+4]] MeshDef.facetID += 1
corners - list of nodal coordinates properly ordered for element type (counter clockwise)
371,157
def first_produced_mesh(self): for instruction in self.instructions: if instruction.produces_meshes(): return instruction.first_produced_mesh raise IndexError("{} produces no meshes".format(self))
The first produced mesh. :return: the first produced mesh :rtype: knittingpattern.Mesh.Mesh :raises IndexError: if no mesh is produced .. seealso:: :attr:`number_of_produced_meshes`
371,158
def pinyin_to_ipa(s): return _convert(s, zhon.pinyin.syllable, pinyin_syllable_to_ipa, remove_apostrophes=True, separate_syllables=True)
Convert all Pinyin syllables in *s* to IPA. Spaces are added between connected syllables and syllable-separating apostrophes are removed.
371,159
def pre_release(self): label = self.version_info.get(, None) pre = self.version_info.get(, None) return True if (label is not None and pre is not None) else False
Return true if version is a pre-release.
371,160
def unsubscribe(self, coro): if coro in self._notify: self._notify.remove(coro) return True return False
Unsubscribe from status updates from the Opentherm Gateway. Can only be used after connect() @coro is a coroutine which has been subscribed with subscribe() earlier. Return True on success, false if not connected or subscribed.
371,161
def send_start(remote, code, device=None, address=None): args = [, remote, code] _call(args, device, address)
All parameters are passed to irsend. See the man page for irsend for details about their usage. Parameters ---------- remote: str code: str device: str address: str Notes ----- No attempt is made to catch or handle errors. See the documentation for subprocess.check_output to see the types of exceptions it may raise.
371,162
def p_parens_expr(p): p[0] = node.expr(op="parens", args=node.expr_list([p[2]]))
expr : LPAREN expr RPAREN
371,163
def get_number_of_partitions_for(self, ar): uid = api.get_uid(ar) num = self.request.get("primary", {}).get(uid) if num is None: template = ar.getTemplate() if template: num = len(template.getPartitions()) else: num = DEFAULT_NUMBER_OF_PARTITIONS try: num = int(num) except (TypeError, ValueError): num = DEFAULT_NUMBER_OF_PARTITIONS return num
Return the number of selected partitions
371,164
def _sort(values, axis=-1, direction=, stable=False, name=None): if direction == : pass elif direction == : values = np.negative(values) else: raise ValueError(.format(direction)) result = np.sort(values, axis, kind= if stable else ) if direction == : return np.negative(result) return result
Numpy implementation of `tf.sort`.
371,165
def rescan(self): if not self.attached: return dprint(5, "starting rescan") newstuff = {}; self.last_scan_timestamp = time.time() for path, watcher in list(self.watchers.items()): newfiles = watcher.newFiles() if newfiles is None: if watcher.survive_deletion: dprintf(5, "access error on %s, but will still be watched\n", watcher.path) else: dprintf(2, "access error on %s, will no longer be watched\n", watcher.path) del self.watchers[path] if not watcher.disappeared: self.emit(SIGNAL("disappearedFile"), path) watcher.disappeared = True continue dprintf(5, "%s: %d new file(s)\n", watcher.path, len(newfiles)) newfiles = [p for p in newfiles if p is path or p not in self.watchers] newfiles = [filename for filename in newfiles if self._watching_state.get(os.path.dirname(filename)) > Purr.UNWATCHED] for newfile in newfiles: if watcher.quiet or self._watching_state.get(os.path.dirname(newfile)) < Purr.POUNCE: quiet = True else: quiet = matches_patterns(os.path.basename(newfile), self._quiet_patterns) newstuff[newfile] = quiet and newstuff.get(newfile, True) dprintf(4, "%s: new data product, quiet=%d (watcher quiet: %s)\n", newfile, quiet, watcher.quiet) self.temp_watchers[newfile] = Purrer.WatchedFile(newfile) for path, watcher in list(self.temp_watchers.items()): if watcher.newFiles() is None: dprintf(2, "access error on %s, marking as disappeared", watcher.path) del self.temp_watchers[path] self.emit(SIGNAL("disappearedFile"), path) return self.makeDataProducts(iter(newstuff.items()))
Checks files and directories on watchlist for updates, rescans them for new data products. If any are found, returns them. Skips those in directories whose watchingState is set to Purr.UNWATCHED.
371,166
def inspect_image(self, image_name, image_tag=): title = % self.__class__.__name__ input_fields = { : image_name, : image_tag } for key, value in input_fields.items(): if value: object_title = % (title, key, str(value)) self.fields.validate(value, % key, object_title) sys_arg = image_name if image_tag: sys_arg += % image_tag import json sys_command = % sys_arg output_dict = json.loads(self.command(sys_command)) image_settings = output_dict[0] return image_settings
a method to retrieve the settings of an image :param image_name: string with name or id of image :param image_tag: [optional] string with tag associated with image :return: dictionary of settings of image { TOO MANY TO LIST }
371,167
def _ceil(self, address): return (((address - 1) + self.page_size) & ~self.page_mask) & self.memory_mask
Returns the smallest page boundary value not less than the address. :rtype: int :param address: the address to calculate its ceil. :return: the ceil of C{address}.
371,168
def merge_with_master_config(self, config, defaults={}, delete_orphan_fields=False) -> dict: if isinstance(config, str): import json config = json.loads(config) properties = self.all_properties() config[] = config.get(, dict()) fields = config[] d_color = defaults.get(, ) d_icon = defaults.get(, ) if delete_orphan_fields: exist = {p.name() for p in properties} unexist = set(fields.keys()) - exist for name in unexist: del fields[name] for p in properties: field = fields.get(p.name(), {: False, : False, : 0, : [], : False, : False, : , : [], : , : False, : 1, : , : False, : , : }) config[][p.name()] = field field[] = .join(p.label()) field[] = .join(p.definition()) field[] = p.name() if not in field: color = self.__merge_close_ancestor_color(p, fields, attr=) field[] = color if color else d_color if not in field: icon = self.__merge_close_ancestor_color(p, fields, attr=) field[] = icon if icon else d_icon if isinstance(p, OntologyObjectProperty): field[] = else: try: field[] = self.__merge_xsd_to_type(next(iter(p.included_ranges()))) except StopIteration: field[] = None return config
Merge current ontology with input master config. :param config: master config, should be str or dict :param defaults: a dict that sets default color and icon :param delete_orphan_fields: if a property doesn't exist in the ontology then delete it :return: merged master config in dict
371,169
def new_workspace(self, name=None, layout=None, workspace_id=None, index=None) -> WorkspaceLayout.WorkspaceLayout: workspace = WorkspaceLayout.WorkspaceLayout() self.document_model.insert_workspace(index if index is not None else len(self.document_model.workspaces), workspace) d = create_image_desc() d["selected"] = True workspace.layout = layout if layout is not None else d workspace.name = name if name is not None else _("Workspace") if workspace_id: workspace.workspace_id = workspace_id return workspace
Create a new workspace, insert into document_model, and return it.
371,170
def decode_list(input_props, name): val_str = input_props.get(name, None) if val_str: return val_str.split(" ") return []
Decodes a space-separated list
371,171
def shadowUpdate(self, srcJSONPayload, srcCallback, srcTimeout): self._basicJSONParserHandler.setString(srcJSONPayload) if self._basicJSONParserHandler.validateJSON(): with self._dataStructureLock: currentToken = self._tokenHandler.getNextToken() self._tokenPool[currentToken] = Timer(srcTimeout, self._timerHandler, ["update", currentToken]) self._basicJSONParserHandler.setAttributeValue("clientToken", currentToken) JSONPayloadWithToken = self._basicJSONParserHandler.regenerateString() self._shadowSubscribeCallbackTable["update"] = srcCallback self._shadowSubscribeStatusTable["update"] += 1 if not self._isPersistentSubscribe or not self._isUpdateSubscribed: self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "update", self.generalCallback) self._isUpdateSubscribed = True self._logger.info("Subscribed to update accepted/rejected topics for deviceShadow: " + self._shadowName) self._shadowManagerHandler.basicShadowPublish(self._shadowName, "update", JSONPayloadWithToken) self._tokenPool[currentToken].start() else: raise ValueError("Invalid JSON file.") return currentToken
**Description** Update the device shadow JSON document string from AWS IoT by publishing the provided JSON document to the corresponding shadow topics. Shadow response topics will be subscribed to receive responses from AWS IoT regarding the result of the get operation. Response will be available in the registered callback. If no response is received within the provided timeout, a timeout notification will be passed into the registered callback. **Syntax** .. code:: python # Update the shadow JSON document from AWS IoT, with a timeout set to 5 seconds BotShadow.shadowUpdate(newShadowJSONDocumentString, customCallback, 5) **Parameters** *srcJSONPayload* - JSON document string used to update shadow JSON document in AWS IoT. *srcCallback* - Function to be called when the response for this shadow request comes back. Should be in form :code:`customCallback(payload, responseStatus, token)`, where :code:`payload` is the JSON document returned, :code:`responseStatus` indicates whether the request has been accepted, rejected or is a delta message, :code:`token` is the token used for tracing in this request. *srcTimeout* - Timeout to determine whether the request is invalid. When a request gets timeout, a timeout notification will be generated and put into the registered callback to notify users. **Returns** The token used for tracing in this shadow request.
371,172
def secret_file(filename): filestat = os.stat(abspath(filename)) if stat.S_ISREG(filestat.st_mode) == 0 and \ stat.S_ISLNK(filestat.st_mode) == 0: e_msg = "Secret file %s must be a real file or symlink" % filename raise aomi.exceptions.AomiFile(e_msg) if platform.system() != "Windows": if filestat.st_mode & stat.S_IROTH or \ filestat.st_mode & stat.S_IWOTH or \ filestat.st_mode & stat.S_IWGRP: e_msg = "Secret file %s has too loose permissions" % filename raise aomi.exceptions.AomiFile(e_msg)
Will check the permissions of things which really should be secret files
371,173
def compile_loaderplugin_entry(self, spec, entry): modname, source, target, modpath = entry handler = spec[CALMJS_LOADERPLUGIN_REGISTRY].get(modname) if handler: return handler(self, spec, modname, source, target, modpath) logger.warning( "no loaderplugin handler found for plugin entry ", modname) return {}, {}, []
Generic loader plugin entry handler. The default implementation assumes that everything up to the first '!' symbol resolves to some known loader plugin within the registry. The registry instance responsible for the resolution of the loader plugin handlers must be available in the spec under CALMJS_LOADERPLUGIN_REGISTRY
371,174
def define_magic(self, name, func): meth = types.MethodType(func, self.user_magics) setattr(self.user_magics, name, meth) record_magic(self.magics, , name, meth)
[Deprecated] Expose own function as magic function for IPython. Example:: def foo_impl(self, parameter_s=''): 'My very own magic!. (Use docstrings, IPython reads them).' print 'Magic function. Passed parameter is between < >:' print '<%s>' % parameter_s print 'The self object is:', self ip.define_magic('foo',foo_impl)
371,175
def hicup_filtering_chart(self): keys = OrderedDict() keys[] = { : , : } keys[] = { : , : } keys[] = { : , : } keys[] = { : , : } keys[] = { : , : } keys[] = { : , : } keys[] = { : , : } config = { : , : , : , : , : False } return bargraph.plot(self.hicup_data, keys, config)
Generate the HiCUP filtering plot
371,176
def vhost_exists(name, runas=None): * if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.user.get_user() return name in list_vhosts(runas=runas)
Return whether the vhost exists based on rabbitmqctl list_vhosts. CLI Example: .. code-block:: bash salt '*' rabbitmq.vhost_exists rabbit_host
371,177
def validate(self): r try: with requests.Session() as session: result = session.get() result = session.post( , data={: self.username, : self.password} ) result_string = result.content.decode("utf-8") soup = BeautifulSoup(result_string, "html.parser") account_select = soup.find( , attrs={: } ) accounts = [] for option in account_select.find_all(): if option[] != : accounts.append(option[]) valid = len(accounts) > 0 return valid except Exception: return False
r"""Validate user credentials. Returns ------- bool Examples -------- Validate credentials. >>> from srpenergy.client import SrpEnergyClient >>> >>> accountid = 'your account id' >>> username = 'your username' >>> password = 'your password' >>> client = SrpEnergyClient(accountid, username, password) >>> >>> valid = client.validate() >>> print(valid) True
371,178
def clear_bucket_props(self, bucket): if not self.pb_clear_bucket_props(): return False msg_code = riak.pb.messages.MSG_CODE_RESET_BUCKET_REQ codec = self._get_codec(msg_code) msg = codec.encode_clear_bucket_props(bucket) self._request(msg, codec) return True
Clear bucket properties, resetting them to their defaults
371,179
def close(self): self.alive = False self.rxThread.join() self.serial.close()
Stops the read thread, waits for it to exit cleanly, then closes the underlying serial port
371,180
def fastaParseSgd(header): rePattern = ID, name, description = re.match(rePattern, header).groups() info = {:ID, :name, :description} return info
Custom parser for fasta headers in the SGD format, see www.yeastgenome.org. :param header: str, protein entry header from a fasta file :returns: dict, parsed header
371,181
def import_parms(self, args): for key, val in args.items(): self.set_parm(key, val)
Import external dict to internal dict
371,182
def as_unordered(self, inplace=False): inplace = validate_bool_kwarg(inplace, ) return self.set_ordered(False, inplace=inplace)
Set the Categorical to be unordered. Parameters ---------- inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to False.
371,183
def _cast_to_type(self, value): try: return float(value) except (ValueError, TypeError): self.fail(, value=value)
Convert the value to a float and raise error on failures
371,184
def resize(self, height, width, **kwargs): self.client.exec_resize(self.exec_id, height=height, width=width)
resize pty of an execed process
371,185
def delete(block_id): _url = get_root_url() LOG.debug(, block_id) try: DB.delete_sched_block_instance(block_id) response = dict(message=.format(block_id)) response[] = { : .format(_url) } return response, HTTPStatus.OK except RuntimeError as error: return dict(error=str(error)), HTTPStatus.BAD_REQUEST
Scheduling block detail resource.
371,186
def has_key(self, key): k = self._lowerOrReturn(key) return k in self.data
Case insensitive test whether 'key' exists.
371,187
def isASLREnabled(self): return self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE == consts.IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
Determines if the current L{PE} instance has the DYNAMICBASE (Use address space layout randomization) flag enabled. @see: U{http://msdn.microsoft.com/en-us/library/bb384887.aspx} @rtype: bool @return: Returns C{True} if the current L{PE} instance has the DYNAMICBASE flag enabled. Otherwise, returns C{False}.
371,188
def _cluster_hits(hits, clusters, assigned_hit_array, cluster_hit_indices, column_cluster_distance, row_cluster_distance, frame_cluster_distance, min_hit_charge, max_hit_charge, ignore_same_hits, noisy_pixels, disabled_pixels): total_hits = hits.shape[0] if total_hits == 0: return 0 max_cluster_hits = cluster_hit_indices.shape[0] if total_hits != clusters.shape[0]: raise ValueError("hits and clusters must be the same size") if total_hits != assigned_hit_array.shape[0]: raise ValueError("hits and assigned_hit_array must be the same size") if min_hit_charge == 0: charge_correction = 1 else: charge_correction = 0 start_event_hit_index = 0 start_event_cluster_index = 0 cluster_size = 0 event_number = hits[0][] event_cluster_index = 0 for i in range(total_hits): if _new_event(hits[i][], event_number): _finish_event( hits=hits, clusters=clusters, start_event_hit_index=start_event_hit_index, stop_event_hit_index=i, start_event_cluster_index=start_event_cluster_index, stop_event_cluster_index=start_event_cluster_index + event_cluster_index) start_event_hit_index = i start_event_cluster_index = start_event_cluster_index + event_cluster_index event_number = hits[i][] event_cluster_index = 0 if assigned_hit_array[i] > 0: continue if not _hit_ok( hit=hits[i], min_hit_charge=min_hit_charge, max_hit_charge=max_hit_charge) or (disabled_pixels.shape[0] != 0 and _pixel_masked(hits[i], disabled_pixels)): _set_hit_invalid(hit=hits[i], cluster_id=-1) assigned_hit_array[i] = 1 continue _set_1d_array(cluster_hit_indices, -1, cluster_size) cluster_hit_indices[0] = i assigned_hit_array[i] = 1 cluster_size = 1 for j in cluster_hit_indices: if j < 0: break for k in range(cluster_hit_indices[0] + 1, total_hits): if _new_event(hits[k][], event_number): break if assigned_hit_array[k] > 0: continue if not _hit_ok( hit=hits[k], min_hit_charge=min_hit_charge, max_hit_charge=max_hit_charge) or (disabled_pixels.shape[0] != 0 and _pixel_masked(hits[k], disabled_pixels)): _set_hit_invalid(hit=hits[k], cluster_id=-1) assigned_hit_array[k] = 1 continue if _is_in_max_difference(hits[j][], hits[k][], column_cluster_distance) and _is_in_max_difference(hits[j][], hits[k][], row_cluster_distance) and _is_in_max_difference(hits[j][], hits[k][], frame_cluster_distance): if not ignore_same_hits or hits[j][] != hits[k][] or hits[j][] != hits[k][]: cluster_size += 1 if cluster_size > max_cluster_hits: raise IndexError() cluster_hit_indices[cluster_size - 1] = k assigned_hit_array[k] = 1 else: _set_hit_invalid(hit=hits[k], cluster_id=-2) assigned_hit_array[k] = 1 if cluster_size == 1 and noisy_pixels.shape[0] != 0 and _pixel_masked(hits[cluster_hit_indices[0]], noisy_pixels): _set_hit_invalid(hit=hits[cluster_hit_indices[0]], cluster_id=-1) else: _finish_cluster( hits=hits, clusters=clusters, cluster_size=cluster_size, cluster_hit_indices=cluster_hit_indices, cluster_index=start_event_cluster_index + event_cluster_index, cluster_id=event_cluster_index, charge_correction=charge_correction, noisy_pixels=noisy_pixels, disabled_pixels=disabled_pixels) event_cluster_index += 1 _finish_event( hits=hits, clusters=clusters, start_event_hit_index=start_event_hit_index, stop_event_hit_index=total_hits, start_event_cluster_index=start_event_cluster_index, stop_event_cluster_index=start_event_cluster_index + event_cluster_index) total_clusters = start_event_cluster_index + event_cluster_index return total_clusters
Main precompiled function that loopes over the hits and clusters them
371,189
def phase_by_transmission(g, window_size, copy=True): g = np.asarray(g, dtype=) g = GenotypeArray(g, copy=copy) g._values = memoryview_safe(g.values) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) _opt_phase_parents_by_transmission(g.values, is_phased, window_size) return g
Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. copy : bool, optional If False, attempt to phase genotypes in-place. Note that this is only possible if the input array has int8 dtype, otherwise a copy is always made regardless of this parameter. Returns ------- g : GenotypeArray Genotype array with progeny phased where possible.
371,190
def _bld_pab_generic(self, funcname, **kwargs): margs = {: pab, : kwargs} setattr(self, funcname, margs)
implements a generic version of an attribute based pandas function
371,191
def apply_noise(data, noise): if noise >= 1: noise = noise/100. for i in range(data.nRows()): ones = data.rowNonZeros(i)[0] replace_indices = numpy.random.choice(ones, size = int(len(ones)*noise), replace = False) for index in replace_indices: data[i, index] = 0 new_indices = numpy.random.choice(data.nCols(), size = int(len(ones)*noise), replace = False) for index in new_indices: while data[i, index] == 1: index = numpy.random.randint(0, data.nCols()) data[i, index] = 1
Applies noise to a sparse matrix. Noise can be an integer between 0 and 100, indicating the percentage of ones in the original input to move, or a float in [0, 1), indicating the same thing. The input matrix is modified in-place, and nothing is returned. This operation does not affect the sparsity of the matrix, or of any individual datapoint.
371,192
def groups_dynamic(self): return sa.orm.relationship( "Group", secondary="users_groups", lazy="dynamic", passive_deletes=True, passive_updates=True, )
returns dynamic relationship for groups - allowing for filtering of data
371,193
def _unpack_tableswitch(bc, offset): jump = (offset % 4) if jump: offset += (4 - jump) (default, low, high), offset = _unpack(_struct_iii, bc, offset) joffs = list() for _index in range((high - low) + 1): j, offset = _unpack(_struct_i, bc, offset) joffs.append(j) return (default, low, high, joffs), offset
function for unpacking the tableswitch op arguments
371,194
def __search_iterable(self, obj, item, parent="root", parents_ids=frozenset({})): for i, thing in enumerate(obj): new_parent = "%s[%s]" % (parent, i) if self.__skip_this(thing, parent=new_parent): continue if self.case_sensitive or not isinstance(thing, strings): thing_cased = thing else: thing_cased = thing.lower() if thing_cased == item: self.__report( report_key=, key=new_parent, value=thing) else: item_id = id(thing) if parents_ids and item_id in parents_ids: continue parents_ids_added = add_to_frozen_set(parents_ids, item_id) self.__search(thing, item, "%s[%s]" % (parent, i), parents_ids_added)
Search iterables except dictionaries, sets and strings.
371,195
def identify_sep(filepath): ext = os.path.splitext(filepath)[1].lower() allowed_exts = [, , ] assert ext in [, ], "Unexpected file extension {}. \ Supported extensions {}\n filename: {}".format( ext, allowed_exts, os.path.basename(filepath)) maybe_seps = [, , , , ] with open(filepath,) as fp: header = fp.__next__() count_seps_header = {sep: _count(sep, header) for sep in maybe_seps} count_seps_header = {sep: count for sep, count in count_seps_header.items() if count > 0} if count_seps_header: return max(count_seps_header.__iter__(), key=(lambda key: count_seps_header[key])) else: raise Exception("Couldns the information:\n HEADER: {}\n SEPS SEARCHED: {}".format(header, maybe_seps))
Identifies the separator of data in a filepath. It reads the first line of the file and counts supported separators. Currently supported separators: ['|', ';', ',','\t',':']
371,196
def Nu_x(self, L, theta, Ts, **statef): Tf = statef[] thetar = radians(theta) if self._isgas: self.Tr = Ts - 0.38 * (Ts - Tf) beta = self._fluid.beta(T=Tf) else: self.Tr = Ts - 0.5 * (Ts - Tf) beta = self._fluid.beta(T=self.Tr) if Ts > Tf: if 0.0 < theta < 45.0: g = const.g*cos(thetar) else: g = const.g else: if -45.0 < theta < 0.0: g = const.g*cos(thetar) else: g = const.g nu = self._fluid.nu(T=self.Tr) alpha = self._fluid.alpha(T=self.Tr) Gr = dq.Gr(L, Ts, Tf, beta, nu, g) Pr = dq.Pr(nu, alpha) Ra = Gr * Pr eq = [self.equation_dict[r] for r in self.regions if r.contains_point(theta, Ra)][0] return eq(self, Ra, Pr)
Calculate the local Nusselt number. :param L: [m] characteristic length of the heat transfer surface :param theta: [°] angle of the surface with the vertical :param Ts: [K] heat transfer surface temperature :param Tf: [K] bulk fluid temperature :returns: float
371,197
def get_freq_map_and_normalizations(self, frequency_list, upper_freq_formula): self.frequency_map = {} self.normalization_map = {} self.upper_freq_formula = upper_freq_formula frequency_list.sort() for idx, frequency in enumerate(frequency_list): self.frequency_map[frequency] = idx self.normalization_map[frequency] = \ (self.metric_params.moments[][frequency])**0.5
If using the --vary-fupper capability we need to store the mapping between index and frequencies in the list. We also precalculate the normalization factor at every frequency, which is used when estimating overlaps to account for abrupt changes in termination frequency. Parameters ----------- frequency_list : array of floats The frequencies for which the metric has been computed and lie within the parameter space being considered. upper_freq_formula : string
371,198
def _oauth_request_parameters(self, url, access_token, parameters={}, method="GET"): consumer_token = self._oauth_consumer_token() base_args = dict( oauth_consumer_key=consumer_token["key"], oauth_token=access_token["key"], oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes), oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"), ) args = {} args.update(base_args) args.update(parameters) if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": signature = _oauth10a_signature(consumer_token, method, url, args, access_token) else: signature = _oauth_signature(consumer_token, method, url, args, access_token) base_args["oauth_signature"] = signature return base_args
Returns the OAuth parameters as a dict for the given request. parameters should include all POST arguments and query string arguments that will be sent with the request.
371,199
def unicode_to_bytes(s, encoding=, errors=): return s if isinstance(s, str) else s.encode(encoding, errors)
Helper to convert unicode strings to bytes for data that needs to be written to on output stream (i.e. terminal) For Python 3 this should be called str_to_bytes :param str s: string to encode :param str encoding: utf-8 by default :param str errors: what to do when encoding fails :return: byte string utf-8 encoded