index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
|---|---|---|---|---|---|
708,621
|
more_executors._impl.futures.bool
|
f_or
|
Boolean ``OR`` over a number of futures.
Signature: :code:`Future<A>[, Future<B>[, ...]] ⟶ Future<A|B|...>`
Arguments:
f (~concurrent.futures.Future)
Any future
fs (~concurrent.futures.Future)
Any futures
Returns:
:class:`~concurrent.futures.Future`
A future resolved from the inputs using ``OR`` semantics:
- Resolved with the earliest true value returned by an input
future, if any.
- Otherwise, resolved with the latest false value or exception
returned by the input futures.
.. note::
This function is tested with up to 100,000 input futures.
Exceeding this limit may result in performance issues.
.. versionadded:: 1.19.0
|
# -*- coding: utf-8 -*-
from threading import Lock
import logging
from concurrent.futures import Future
from .base import chain_cancel, weak_callback
from ..common import copy_future_exception, try_set_result
from .check import ensure_futures
from ..logwrap import LogWrapper
from ..metrics import track_future
LOG = LogWrapper(logging.getLogger("more_executors.futures"))
class BoolOperation(object):
def __init__(self, fs):
self.fs = {}
for f in fs:
self.fs[f] = True
self.done = False
self.lock = Lock()
self.out = Future()
for f in fs:
chain_cancel(self.out, f)
f.add_done_callback(weak_callback(self.handle_done))
def get_state_update(self, f):
raise NotImplementedError() # pragma: no cover
def handle_done(self, f):
set_result = False
set_exception = False
cancel_futures = set()
with self.lock:
if self.done:
return
del self.fs[f]
(set_result, set_exception, cancel_futures) = self.get_state_update(f)
if set_result:
try_set_result(self.out, f.result())
if set_exception:
copy_future_exception(f, self.out)
for to_cancel in cancel_futures:
to_cancel.cancel()
|
(f, *fs)
|
708,622
|
more_executors._impl.futures.proxy
|
f_proxy
|
Proxy calls on a future through to the future's result.
The value returned by this function is a future resolved with the
same result (or exception) as the input future ``f``. It will also
proxy most attribute lookups and method calls through to the
underlying result, awaiting the result when needed.
Note that since the returned value is intended to remain usable as
a ``Future``, this proxy is relatively conservative and avoids
proxying functionality which would clash with the ``Future``
interface.
Functionality which is not proxied includes:
- conversion to boolean (``__bool__``)
- conversion to string (``__str__``, ``__repr__``)
- methods relating to object identity (``__eq__``, ``__hash__``)
Signature: :code:`Future<X> ⟶ Future<X>`
Arguments:
f (~concurrent.futures.Future)
Any future.
timeout (float)
Timeout applied when awaiting the future's result
during proxied calls.
Returns:
:class:`~concurrent.futures.Future`
a Future which proxies calls through to the
future's result as needed.
.. versionadded:: 2.3.0
|
def __len__(self):
return len(self.__result)
|
(f, **kwargs)
|
708,623
|
more_executors._impl.futures.base
|
f_return
|
Return a future which provides the value `x`.
Signature: :code:`A ⟶ Future<A>`
Arguments:
x
A value to be returned
Returns:
:class:`~concurrent.futures.Future` of :obj:`x`
A future immediately resolved with the value :obj:`x`.
.. versionadded:: 1.19.0
.. versionchanged:: 2.1.0
value now defaults to :code:`None`
|
def f_return(x=None):
"""Return a future which provides the value `x`.
Signature: :code:`A ⟶ Future<A>`
Arguments:
x
A value to be returned
Returns:
:class:`~concurrent.futures.Future` of :obj:`x`
A future immediately resolved with the value :obj:`x`.
.. versionadded:: 1.19.0
.. versionchanged:: 2.1.0
value now defaults to :code:`None`
"""
future = Future()
track_future(future, type="return", executor="none")
future.set_result(x)
return future
|
(x=None)
|
708,624
|
more_executors._impl.futures.base
|
f_return_cancelled
|
Returns:
:class:`~concurrent.futures.Future`
A future which is cancelled.
.. versionadded:: 1.19.0
|
def f_return_cancelled():
"""
Returns:
:class:`~concurrent.futures.Future`
A future which is cancelled.
.. versionadded:: 1.19.0
"""
f = Future()
track_future(f, type="return_cancelled", executor="none")
f.cancel()
f.set_running_or_notify_cancel()
return f
|
()
|
708,625
|
more_executors._impl.futures.base
|
f_return_error
|
Return a future which returns/raises the exception `x`.
Arguments:
x (Exception)
An exception to be returned/raised.
traceback (traceback)
An optional traceback associated with the exception.
This argument should only be provided on Python 2.x.
It will be used in the value returned from
:meth:`concurrent.futures.Future.exception_info`,
which only exists on Python 2.
Returns:
:class:`~concurrent.futures.Future` of :obj:`x`
A future immediately resolved with the exception :obj:`x`.
.. versionadded:: 1.19.0
|
def f_return_error(x, traceback=None):
"""Return a future which returns/raises the exception `x`.
Arguments:
x (Exception)
An exception to be returned/raised.
traceback (traceback)
An optional traceback associated with the exception.
This argument should only be provided on Python 2.x.
It will be used in the value returned from
:meth:`concurrent.futures.Future.exception_info`,
which only exists on Python 2.
Returns:
:class:`~concurrent.futures.Future` of :obj:`x`
A future immediately resolved with the exception :obj:`x`.
.. versionadded:: 1.19.0
"""
f = Future()
track_future(f, type="return_error", executor="none")
copy_exception(f, x, traceback)
return f
|
(x, traceback=None)
|
708,626
|
more_executors._impl.futures.sequence
|
f_sequence
|
Transform a list of futures into a future containing a list.
Signature: :code:`list<Future<X>> ⟶ Future<list<X>>`
Arguments:
futures (iterable of :class:`~concurrent.futures.Future`)
A list or other iterable of futures.
Returns:
:class:`~concurrent.futures.Future` of :class:`list`
A future resolved with either:
- a list holding the output value of each input future
- or an exception, if any input future raised an exception
.. note::
This function is tested with up to 100,000 input futures.
Exceeding this limit may result in performance issues.
.. versionadded:: 1.19.0
|
def f_sequence(futures):
"""Transform a list of futures into a future containing a list.
Signature: :code:`list<Future<X>> ⟶ Future<list<X>>`
Arguments:
futures (iterable of :class:`~concurrent.futures.Future`)
A list or other iterable of futures.
Returns:
:class:`~concurrent.futures.Future` of :class:`list`
A future resolved with either:
- a list holding the output value of each input future
- or an exception, if any input future raised an exception
.. note::
This function is tested with up to 100,000 input futures.
Exceeding this limit may result in performance issues.
.. versionadded:: 1.19.0
"""
return track_future(f_traverse(lambda x: x, futures), type="sequence")
|
(futures)
|
708,627
|
more_executors._impl.futures.timeout
|
f_timeout
|
Wrap a future to cancel it after a timeout is reached.
Signature: :code:`Future<X>, float ⟶ Future<X>`
Arguments:
future (~concurrent.futures.Future)
Any future.
timeout (float)
A timeout to apply to the future, in seconds.
Returns:
:class:`~concurrent.futures.Future`
A wrapped version of :obj:`future` which may be cancelled if the
future has not completed within :obj:`timeout` seconds.
Note: only a single attempt is made to cancel the future, and there
is no guarantee that the cancel will succeed.
.. versionadded:: 1.19.0
|
def f_timeout(future, timeout):
"""Wrap a future to cancel it after a timeout is reached.
Signature: :code:`Future<X>, float ⟶ Future<X>`
Arguments:
future (~concurrent.futures.Future)
Any future.
timeout (float)
A timeout to apply to the future, in seconds.
Returns:
:class:`~concurrent.futures.Future`
A wrapped version of :obj:`future` which may be cancelled if the
future has not completed within :obj:`timeout` seconds.
Note: only a single attempt is made to cancel the future, and there
is no guarantee that the cancel will succeed.
.. versionadded:: 1.19.0
"""
return timeout_executor().submit_timeout(timeout, lambda: future)
|
(future, timeout)
|
708,628
|
more_executors._impl.futures.sequence
|
f_traverse
|
Traverse over an iterable calling a future-returning function,
and return a future holding the returned values as a list.
Signature: :code:`fn<A⟶Future<B>>, iterable<A> ⟶ Future<list<B>>`
Arguments:
fn (callable)
A unary function returning a future.
xs (iterable)
An iterable to be traversed.
Returns:
:class:`~concurrent.futures.Future` of :class:`list`
A future resolved with either:
- a list holding the resolved output values of :obj:`fn`
- or an exception, if :obj:`fn` or any future produced by :obj:`fn` failed
.. versionadded:: 1.19.0
|
def f_traverse(fn, xs):
"""Traverse over an iterable calling a future-returning function,
and return a future holding the returned values as a list.
Signature: :code:`fn<A⟶Future<B>>, iterable<A> ⟶ Future<list<B>>`
Arguments:
fn (callable)
A unary function returning a future.
xs (iterable)
An iterable to be traversed.
Returns:
:class:`~concurrent.futures.Future` of :class:`list`
A future resolved with either:
- a list holding the resolved output values of :obj:`fn`
- or an exception, if :obj:`fn` or any future produced by :obj:`fn` failed
.. versionadded:: 1.19.0
"""
try:
futures = [fn(x) for x in xs]
except Exception:
future = Future()
copy_exception(future)
return future
zipped = f_zip(*futures)
return track_future(f_map(zipped, list), type="traverse")
|
(fn, xs)
|
708,629
|
more_executors._impl.futures.zip
|
f_zip
|
Create a new future holding the return values of any number of input futures.
Signature: :code:`Future<A>[, Future<B>[, ...]] ⟶ Future<A[, B[, ...]]>`
Arguments:
fs (~concurrent.futures.Future)
Any number of futures.
Returns:
:class:`~concurrent.futures.Future` of :class:`tuple`
A future holding the returned values of all input futures as a tuple.
The returned tuple has the same length and order as the input futures.
Alternatively, a future raising an exception or a cancelled future,
if any input futures raised an exception or was cancelled.
.. note::
This function is tested with up to 100,000 input futures.
Exceeding this limit may result in performance issues.
.. versionadded:: 1.19.0
|
# -*- coding: utf-8 -*-
from concurrent.futures import Future
from threading import Lock
from functools import partial
from collections import namedtuple
from more_executors._impl.common import (
copy_future_exception,
try_set_result,
)
from .base import f_return, chain_cancel, weak_callback
from .check import ensure_futures
from ..metrics import track_future
# For small-ish tuples, we make f_zip return namedtuple instances
# which can be traced back to here rather than bare tuples. The point
# is to improve debuggability of code where the future returned by
# f_zip ends up being logged with repr(), as in that case only the
# result's type will be logged. Seeing e.g. 'ZipTuple3' in a crash log
# rather than 'tuple' could be potentially very helpful.
TUPLE_CLASSES = []
for i in range(0, 20):
TUPLE_CLASSES.append(
namedtuple("ZipTuple%s" % i, ["f%s" % idx for idx in range(0, i)])
)
def maketuple(value):
vlen = len(value)
if vlen < len(TUPLE_CLASSES):
return TUPLE_CLASSES[vlen](*value)
return tuple(value)
|
(*fs)
|
708,638
|
cyvcf2.cyvcf2
|
VCF
|
VCF(fname, mode=u'r', gts012=False, lazy=False, strict_gt=False, samples=None, threads=None)
VCF class holds methods to iterate over and query a VCF.
Parameters
----------
fname: str
path to file
gts012: bool
if True, then gt_types will be 0=HOM_REF, 1=HET, 2=HOM_ALT, 3=UNKNOWN. If False, 3, 2 are flipped.
lazy: bool
if True, then don't unpack (parse) the underlying record until needed.
strict_gt: bool
if True, then any '.' present in a genotype will classify the corresponding element in the gt_types array as UNKNOWN.
samples: list
list of samples to extract from full set in file.
threads: int
the number of threads to use including this reader.
Returns
-------
VCF object for iterating and querying.
|
from cyvcf2.cyvcf2 import VCF
| null |
708,641
|
cyvcf2.cyvcf2
|
Variant
|
Variant(*args, **kwargs)
Variant represents a single VCF Record.
It is created internally by iterating over a VCF.
Attributes
----------
INFO: `INFO`
a dictionary-like field that provides access to the VCF INFO field.
POS: the 1-based variant start.
|
from cyvcf2.cyvcf2 import Variant
| null |
708,642
|
cyvcf2.cyvcf2
|
Writer
|
Writer(fname, VCF tmpl, mode=None)
Writer class makes a VCF Writer.
Parameters
----------
fname: str
path to file
tmpl: VCF
a template to use to create the output header.
mode: str
| Mode to use for writing the file. If ``None`` (default) is given, the mode is
inferred from the filename extension. If stdout (``"-"``) is provided for ``fname``
and ``mode`` is left at default, uncompressed VCF will be produced.
| Valid values are:
| - ``"wbu"``: uncompressed BCF
| - ``"wb"``: compressed BCF
| - ``"wz"``: compressed VCF
| - ``"w"``: uncompressed VCF
| Compression level can also be indicated by adding a single integer to one of
the compressed modes (e.g. ``"wz4"`` for VCF with compressions level 4).
Note
----
File extensions ``.bcf`` and ``.bcf.gz`` will both return compressed BCF. If you
want uncompressed BCF you must explicitly provide the appropriate ``mode``.
Returns
-------
VCF object for iterating and querying.
|
from cyvcf2.cyvcf2 import Writer
| null |
708,644
|
builtins
|
Jieba
| null |
from builtins import Jieba
| null |
708,646
|
wom_connector.instrument
|
Instrument
| null |
class Instrument:
def __init__(self, domain: str, instrument_id: int, instrument_privk: bytes, instrument_privk_password: str=None):
"""
Create an instance of an WOM Instrument able to generate vouchers.
Keys should be loaded using:
privk = open("keys/instrument1.pem", "rb")
and provided as:
privk.read()
:rtype: Instrument
:param domain: str: Domain of the Registry (e.g., wom.social).
:param instrument_id: int: Unique instrument ID, assigned by the WOM platform.
:param instrument_privk: bytes: Instrument private key in bytes format.
:param instrument_privk_password: bytes: Optional password for Instrument private key (Default value = None).
"""
self.__registry_proxy = RegistryProxy(domain)
self.ID = instrument_id
self.__instrument_privk = self.__load_private_key(instrument_privk, instrument_privk_password, "Instrument Private Key")
self.__logger = WOMLogger("Instrument")
@staticmethod
def __load_private_key(private_key_str, password, tag):
private_key = load_pem_private_key(private_key_str, password, default_backend())
if not isinstance(private_key, RSAPrivateKey):
raise TypeError("{0} is not a private RSA key" % tag)
return private_key
@staticmethod
def __generate_nonce(nonce=None):
return base64.b64encode(uuid.uuid4().bytes).decode('utf-8') if nonce is None else nonce
def request_vouchers(self, vouchers: list, nonce: str = None, password: str = None) -> (str, str):
"""
Obtains vouchers from WOM Registry through Registry remote API.
It returns a list containing the OTC representing the vouchers and the associated password.
:param vouchers: list: A list of Voucher items or a list of properly structure dictionaries
:param nonce: str: Communication nonce. If None, a proper nonce will be generated. (Default value = None)
:param password: str: Voucher OTC password. If None, a secure password will be generated by the Registry. (Default value = None)
:returns: OTC, password): (str, str)
:rtype: list(str, str)
"""
# call to voucher/create API
response_data = self.__voucher_create(vouchers, nonce, password)
# call to voucher/verify API
self.__voucher_verify(response_data['otc'])
return response_data['otc'], response_data['password']
def __voucher_create(self, vouchers, nonce=None, password=None):
# check arguments
if vouchers is None \
or not isinstance(vouchers, list) \
or len(vouchers) == 0:
raise ValueError("Voucher list is not valid or empty")
if not isinstance(vouchers[0], Voucher) \
and not isinstance(vouchers[0], dict):
raise ValueError("Vouchers has to be instances of Voucher or dictionaries")
# generate a valid nonce if there is no one
effective_nonce = self.__generate_nonce(nonce)
payload = json.dumps({'sourceId': self.ID,
'nonce': effective_nonce,
'password': password,
'vouchers': vouchers}, cls=VoucherEncoder if isinstance(vouchers[0], Voucher) else None)
self.__logger.debug(payload)
# encrypt inner payload
vouchers_create_payload = Crypto.encrypt(payload,
public_key=self.__registry_proxy.PublicKey)
# make registry request
json_response = self.__registry_proxy.voucher_create(source_id=self.ID,
nonce=effective_nonce,
payload=vouchers_create_payload.decode('utf-8'))
# decode registry response
response = Crypto.decrypt(json_response['payload'], private_key=self.__instrument_privk)
return json.loads(response.decode('utf-8'))
def __voucher_verify(self, otc):
encrypted_otc = Crypto.encrypt(json.dumps({'otc': otc}), public_key=self.__registry_proxy.PublicKey)
self.__registry_proxy.voucher_verify(encrypted_otc.decode('utf-8'))
|
(domain: str, instrument_id: int, instrument_privk: bytes, instrument_privk_password: str = None)
|
708,647
|
wom_connector.instrument
|
__generate_nonce
| null |
@staticmethod
def __generate_nonce(nonce=None):
return base64.b64encode(uuid.uuid4().bytes).decode('utf-8') if nonce is None else nonce
|
(nonce=None)
|
708,648
|
wom_connector.instrument
|
__load_private_key
| null |
@staticmethod
def __load_private_key(private_key_str, password, tag):
private_key = load_pem_private_key(private_key_str, password, default_backend())
if not isinstance(private_key, RSAPrivateKey):
raise TypeError("{0} is not a private RSA key" % tag)
return private_key
|
(private_key_str, password, tag)
|
708,649
|
wom_connector.instrument
|
__voucher_create
| null |
def __voucher_create(self, vouchers, nonce=None, password=None):
# check arguments
if vouchers is None \
or not isinstance(vouchers, list) \
or len(vouchers) == 0:
raise ValueError("Voucher list is not valid or empty")
if not isinstance(vouchers[0], Voucher) \
and not isinstance(vouchers[0], dict):
raise ValueError("Vouchers has to be instances of Voucher or dictionaries")
# generate a valid nonce if there is no one
effective_nonce = self.__generate_nonce(nonce)
payload = json.dumps({'sourceId': self.ID,
'nonce': effective_nonce,
'password': password,
'vouchers': vouchers}, cls=VoucherEncoder if isinstance(vouchers[0], Voucher) else None)
self.__logger.debug(payload)
# encrypt inner payload
vouchers_create_payload = Crypto.encrypt(payload,
public_key=self.__registry_proxy.PublicKey)
# make registry request
json_response = self.__registry_proxy.voucher_create(source_id=self.ID,
nonce=effective_nonce,
payload=vouchers_create_payload.decode('utf-8'))
# decode registry response
response = Crypto.decrypt(json_response['payload'], private_key=self.__instrument_privk)
return json.loads(response.decode('utf-8'))
|
(self, vouchers, nonce=None, password=None)
|
708,650
|
wom_connector.instrument
|
__voucher_verify
| null |
def __voucher_verify(self, otc):
encrypted_otc = Crypto.encrypt(json.dumps({'otc': otc}), public_key=self.__registry_proxy.PublicKey)
self.__registry_proxy.voucher_verify(encrypted_otc.decode('utf-8'))
|
(self, otc)
|
708,651
|
wom_connector.instrument
|
__init__
|
Create an instance of an WOM Instrument able to generate vouchers.
Keys should be loaded using:
privk = open("keys/instrument1.pem", "rb")
and provided as:
privk.read()
:rtype: Instrument
:param domain: str: Domain of the Registry (e.g., wom.social).
:param instrument_id: int: Unique instrument ID, assigned by the WOM platform.
:param instrument_privk: bytes: Instrument private key in bytes format.
:param instrument_privk_password: bytes: Optional password for Instrument private key (Default value = None).
|
def __init__(self, domain: str, instrument_id: int, instrument_privk: bytes, instrument_privk_password: str=None):
"""
Create an instance of an WOM Instrument able to generate vouchers.
Keys should be loaded using:
privk = open("keys/instrument1.pem", "rb")
and provided as:
privk.read()
:rtype: Instrument
:param domain: str: Domain of the Registry (e.g., wom.social).
:param instrument_id: int: Unique instrument ID, assigned by the WOM platform.
:param instrument_privk: bytes: Instrument private key in bytes format.
:param instrument_privk_password: bytes: Optional password for Instrument private key (Default value = None).
"""
self.__registry_proxy = RegistryProxy(domain)
self.ID = instrument_id
self.__instrument_privk = self.__load_private_key(instrument_privk, instrument_privk_password, "Instrument Private Key")
self.__logger = WOMLogger("Instrument")
|
(self, domain: str, instrument_id: int, instrument_privk: bytes, instrument_privk_password: Optional[str] = None)
|
708,652
|
wom_connector.instrument
|
request_vouchers
|
Obtains vouchers from WOM Registry through Registry remote API.
It returns a list containing the OTC representing the vouchers and the associated password.
:param vouchers: list: A list of Voucher items or a list of properly structure dictionaries
:param nonce: str: Communication nonce. If None, a proper nonce will be generated. (Default value = None)
:param password: str: Voucher OTC password. If None, a secure password will be generated by the Registry. (Default value = None)
:returns: OTC, password): (str, str)
:rtype: list(str, str)
|
def request_vouchers(self, vouchers: list, nonce: str = None, password: str = None) -> (str, str):
"""
Obtains vouchers from WOM Registry through Registry remote API.
It returns a list containing the OTC representing the vouchers and the associated password.
:param vouchers: list: A list of Voucher items or a list of properly structure dictionaries
:param nonce: str: Communication nonce. If None, a proper nonce will be generated. (Default value = None)
:param password: str: Voucher OTC password. If None, a secure password will be generated by the Registry. (Default value = None)
:returns: OTC, password): (str, str)
:rtype: list(str, str)
"""
# call to voucher/create API
response_data = self.__voucher_create(vouchers, nonce, password)
# call to voucher/verify API
self.__voucher_verify(response_data['otc'])
return response_data['otc'], response_data['password']
|
(self, vouchers: list, nonce: Optional[str] = None, password: Optional[str] = None) -> (<class 'str'>, <class 'str'>)
|
708,653
|
wom_connector.pointofsale
|
POS
|
Create an instance of a WOM PointOfSale able to generate payment instance.
Keys should be loaded using:
privk = open("keys/pos1.pem", "rb")
and provided as:
privk.read()
:rtype: POS
:param domain: str: Domain of the Registry (e.g., wom.social).
:param pos_id: str: Unique instrument ID, assigned by the WOM platform.
:param pos_privk: bytes: POS private key in bytes format.
:param pos_privk_password: bytes: Optional password for POS private key (Default value = None)
|
class POS:
"""
Create an instance of a WOM PointOfSale able to generate payment instance.
Keys should be loaded using:
privk = open("keys/pos1.pem", "rb")
and provided as:
privk.read()
:rtype: POS
:param domain: str: Domain of the Registry (e.g., wom.social).
:param pos_id: str: Unique instrument ID, assigned by the WOM platform.
:param pos_privk: bytes: POS private key in bytes format.
:param pos_privk_password: bytes: Optional password for POS private key (Default value = None)
"""
def __init__(self, domain: str, pos_id: str, pos_privk: bytes, pos_privk_password: str=None):
self.__registry_proxy = RegistryProxy(domain)
self.ID = pos_id
self.__pos_privk = self.__load_private_key(pos_privk, pos_privk_password,
"POS Private Key")
self.__logger = WOMLogger("POS")
@classmethod
def __load_private_key(cls, private_key_str, password, tag):
private_key = load_pem_private_key(private_key_str, password, default_backend())
if not isinstance(private_key, RSAPrivateKey):
raise TypeError("{0} is not a private RSA key" % tag)
return private_key
@staticmethod
def __generate_nonce(nonce=None):
return base64.b64encode(uuid.uuid4().bytes).decode('utf-8') if nonce is None else nonce
def request_payment(self, amount: int,
pocket_ack_url: str,
filter: Filter = None,
pos_ack_url: str = None,
persistent: bool = False,
nonce=None,
password=None) -> (str, str):
"""
Obtains payment requests from WOM Registry through Registry remote API.
It returns a list containing the OTC representing the payment request and the associated password.
:param amount: int: the number of voucher needed as a payment
:param pocket_ack_url: str: the URL to be called for voucher spending notification, on pocket side
:param filter: Filter: filter to be applied on vouchers to be valid for the payment (Default value = None)
:param pos_ack_url: str: the URL to be called for voucher spending notification, on pos side (Default value = None)
:param persistent: bool: is this payment persistent? (Default value = False)
:param nonce: str: Communication nonce. If None, a proper nonce will be generated. (Default value = None)
:param password: str: Payment OTC password. If None, a secure password will be generated by the Registry. (Default value = None)
:returns: OTC, password): (str, str)
:rtype: list(str, str)
"""
# call to payment/register API
response_data = self.__payment_register(amount, pocket_ack_url, filter, pos_ack_url, persistent, nonce, password)
# call to payment/verify API
self.__payment_verify(response_data['otc'])
return response_data['otc'], response_data['password']
def __payment_register(self, amount,
pocket_ack_url: str,
filter: Filter = None,
pos_ack_url: str = None,
persistent: bool = False,
nonce=None,
password=None):
# check arguments
if amount < 1:
raise ValueError("Amount has to be a positive, non-zero, integer")
if len(pocket_ack_url) < 5:
raise ValueError("PocketAckUrl has to be a valid URL")
if pos_ack_url is not None and len(pos_ack_url) < 5:
raise ValueError("PosAckUrl has to be a valid URL or None")
# generate a valid nonce if there is no one
effective_nonce = self.__generate_nonce(nonce)
payload = json.dumps({'posId': self.ID,
'nonce': effective_nonce,
'password': password,
'amount': amount,
'simpleFilter': filter,
'pocketAckUrl': pocket_ack_url,
'posAckUrl': pos_ack_url,
'persistent': persistent
}, cls=FilterEncoder if filter is not None else None)
self.__logger.debug(payload)
# encrypt inner payload
payment_register_payload = Crypto.encrypt(payload,
public_key=self.__registry_proxy.PublicKey)
# make registry request
json_response = self.__registry_proxy.payment_register(source_id=self.ID,
nonce=effective_nonce,
payload=payment_register_payload.decode('utf-8'))
# decode registry response
response = Crypto.decrypt(json_response['payload'], private_key=self.__pos_privk)
return json.loads(response.decode('utf-8'))
def __payment_verify(self, otc):
encrypted_otc = Crypto.encrypt(json.dumps({'otc': otc}), public_key=self.__registry_proxy.PublicKey)
self.__registry_proxy.payment_verify(encrypted_otc.decode('utf-8'))
|
(domain: str, pos_id: str, pos_privk: bytes, pos_privk_password: str = None)
|
708,655
|
wom_connector.pointofsale
|
__payment_register
| null |
def __payment_register(self, amount,
pocket_ack_url: str,
filter: Filter = None,
pos_ack_url: str = None,
persistent: bool = False,
nonce=None,
password=None):
# check arguments
if amount < 1:
raise ValueError("Amount has to be a positive, non-zero, integer")
if len(pocket_ack_url) < 5:
raise ValueError("PocketAckUrl has to be a valid URL")
if pos_ack_url is not None and len(pos_ack_url) < 5:
raise ValueError("PosAckUrl has to be a valid URL or None")
# generate a valid nonce if there is no one
effective_nonce = self.__generate_nonce(nonce)
payload = json.dumps({'posId': self.ID,
'nonce': effective_nonce,
'password': password,
'amount': amount,
'simpleFilter': filter,
'pocketAckUrl': pocket_ack_url,
'posAckUrl': pos_ack_url,
'persistent': persistent
}, cls=FilterEncoder if filter is not None else None)
self.__logger.debug(payload)
# encrypt inner payload
payment_register_payload = Crypto.encrypt(payload,
public_key=self.__registry_proxy.PublicKey)
# make registry request
json_response = self.__registry_proxy.payment_register(source_id=self.ID,
nonce=effective_nonce,
payload=payment_register_payload.decode('utf-8'))
# decode registry response
response = Crypto.decrypt(json_response['payload'], private_key=self.__pos_privk)
return json.loads(response.decode('utf-8'))
|
(self, amount, pocket_ack_url: str, filter: Optional[wom_connector.filter.Filter] = None, pos_ack_url: Optional[str] = None, persistent: bool = False, nonce=None, password=None)
|
708,656
|
wom_connector.pointofsale
|
__payment_verify
| null |
def __payment_verify(self, otc):
encrypted_otc = Crypto.encrypt(json.dumps({'otc': otc}), public_key=self.__registry_proxy.PublicKey)
self.__registry_proxy.payment_verify(encrypted_otc.decode('utf-8'))
|
(self, otc)
|
708,657
|
wom_connector.pointofsale
|
__init__
| null |
def __init__(self, domain: str, pos_id: str, pos_privk: bytes, pos_privk_password: str=None):
self.__registry_proxy = RegistryProxy(domain)
self.ID = pos_id
self.__pos_privk = self.__load_private_key(pos_privk, pos_privk_password,
"POS Private Key")
self.__logger = WOMLogger("POS")
|
(self, domain: str, pos_id: str, pos_privk: bytes, pos_privk_password: Optional[str] = None)
|
708,658
|
wom_connector.pointofsale
|
request_payment
|
Obtains payment requests from WOM Registry through Registry remote API.
It returns a list containing the OTC representing the payment request and the associated password.
:param amount: int: the number of voucher needed as a payment
:param pocket_ack_url: str: the URL to be called for voucher spending notification, on pocket side
:param filter: Filter: filter to be applied on vouchers to be valid for the payment (Default value = None)
:param pos_ack_url: str: the URL to be called for voucher spending notification, on pos side (Default value = None)
:param persistent: bool: is this payment persistent? (Default value = False)
:param nonce: str: Communication nonce. If None, a proper nonce will be generated. (Default value = None)
:param password: str: Payment OTC password. If None, a secure password will be generated by the Registry. (Default value = None)
:returns: OTC, password): (str, str)
:rtype: list(str, str)
|
def request_payment(self, amount: int,
pocket_ack_url: str,
filter: Filter = None,
pos_ack_url: str = None,
persistent: bool = False,
nonce=None,
password=None) -> (str, str):
"""
Obtains payment requests from WOM Registry through Registry remote API.
It returns a list containing the OTC representing the payment request and the associated password.
:param amount: int: the number of voucher needed as a payment
:param pocket_ack_url: str: the URL to be called for voucher spending notification, on pocket side
:param filter: Filter: filter to be applied on vouchers to be valid for the payment (Default value = None)
:param pos_ack_url: str: the URL to be called for voucher spending notification, on pos side (Default value = None)
:param persistent: bool: is this payment persistent? (Default value = False)
:param nonce: str: Communication nonce. If None, a proper nonce will be generated. (Default value = None)
:param password: str: Payment OTC password. If None, a secure password will be generated by the Registry. (Default value = None)
:returns: OTC, password): (str, str)
:rtype: list(str, str)
"""
# call to payment/register API
response_data = self.__payment_register(amount, pocket_ack_url, filter, pos_ack_url, persistent, nonce, password)
# call to payment/verify API
self.__payment_verify(response_data['otc'])
return response_data['otc'], response_data['password']
|
(self, amount: int, pocket_ack_url: str, filter: Optional[wom_connector.filter.Filter] = None, pos_ack_url: Optional[str] = None, persistent: bool = False, nonce=None, password=None) -> (<class 'str'>, <class 'str'>)
|
708,659
|
wom_connector.voucher
|
Voucher
| null |
class Voucher:
def to_JSON(self):
return self.__dict__
@staticmethod
def create(aim: str, latitude: float, longitude: float, timestamp: datetime, count: int = 1) -> 'Voucher':
voucher = Voucher()
voucher.Aim = aim
voucher.Latitude = latitude
voucher.Longitude = longitude
voucher.Timestamp = timestamp.isoformat()
voucher.Count = count
voucher.CreationMode = 'Standard'
return voucher
@staticmethod
def createWithoutLocation(aim: str, timestamp: datetime, count: int = 1) -> 'Voucher':
voucher = Voucher()
voucher.Aim = aim
voucher.Latitude = 0.0
voucher.Longitude = 0.0
voucher.Timestamp = timestamp.isoformat()
voucher.Count = count
voucher.CreationMode = 'SetLocationOnRedeem'
return voucher
|
()
|
708,660
|
wom_connector.voucher
|
create
| null |
@staticmethod
def create(aim: str, latitude: float, longitude: float, timestamp: datetime, count: int = 1) -> 'Voucher':
voucher = Voucher()
voucher.Aim = aim
voucher.Latitude = latitude
voucher.Longitude = longitude
voucher.Timestamp = timestamp.isoformat()
voucher.Count = count
voucher.CreationMode = 'Standard'
return voucher
|
(aim: str, latitude: float, longitude: float, timestamp: datetime.datetime, count: int = 1) -> wom_connector.voucher.Voucher
|
708,661
|
wom_connector.voucher
|
createWithoutLocation
| null |
@staticmethod
def createWithoutLocation(aim: str, timestamp: datetime, count: int = 1) -> 'Voucher':
voucher = Voucher()
voucher.Aim = aim
voucher.Latitude = 0.0
voucher.Longitude = 0.0
voucher.Timestamp = timestamp.isoformat()
voucher.Count = count
voucher.CreationMode = 'SetLocationOnRedeem'
return voucher
|
(aim: str, timestamp: datetime.datetime, count: int = 1) -> wom_connector.voucher.Voucher
|
708,662
|
wom_connector.voucher
|
to_JSON
| null |
def to_JSON(self):
return self.__dict__
|
(self)
|
708,671
|
pathvalidate._base
|
AbstractSanitizer
| null |
class AbstractSanitizer(BaseFile, metaclass=abc.ABCMeta):
def __init__(
self,
validator: AbstractValidator,
max_len: int,
fs_encoding: Optional[str],
validate_after_sanitize: bool,
null_value_handler: Optional[ValidationErrorHandler] = None,
reserved_name_handler: Optional[ValidationErrorHandler] = None,
additional_reserved_names: Optional[Sequence[str]] = None,
platform_max_len: Optional[int] = None,
platform: Optional[PlatformType] = None,
) -> None:
super().__init__(
max_len=max_len,
fs_encoding=fs_encoding,
additional_reserved_names=additional_reserved_names,
platform_max_len=platform_max_len,
platform=platform,
)
if null_value_handler is None:
null_value_handler = NullValueHandler.return_null_string
self._null_value_handler = null_value_handler
if reserved_name_handler is None:
reserved_name_handler = ReservedNameHandler.add_trailing_underscore
self._reserved_name_handler = reserved_name_handler
self._validate_after_sanitize = validate_after_sanitize
self._validator = validator
@abc.abstractmethod
def sanitize(self, value: PathType, replacement_text: str = "") -> PathType: # pragma: no cover
pass
|
(validator: pathvalidate._base.AbstractValidator, max_len: int, fs_encoding: Optional[str], validate_after_sanitize: bool, null_value_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, reserved_name_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, additional_reserved_names: Optional[Sequence[str]] = None, platform_max_len: Optional[int] = None, platform: Optional[~PlatformType] = None) -> None
|
708,672
|
pathvalidate._base
|
__init__
| null |
def __init__(
self,
validator: AbstractValidator,
max_len: int,
fs_encoding: Optional[str],
validate_after_sanitize: bool,
null_value_handler: Optional[ValidationErrorHandler] = None,
reserved_name_handler: Optional[ValidationErrorHandler] = None,
additional_reserved_names: Optional[Sequence[str]] = None,
platform_max_len: Optional[int] = None,
platform: Optional[PlatformType] = None,
) -> None:
super().__init__(
max_len=max_len,
fs_encoding=fs_encoding,
additional_reserved_names=additional_reserved_names,
platform_max_len=platform_max_len,
platform=platform,
)
if null_value_handler is None:
null_value_handler = NullValueHandler.return_null_string
self._null_value_handler = null_value_handler
if reserved_name_handler is None:
reserved_name_handler = ReservedNameHandler.add_trailing_underscore
self._reserved_name_handler = reserved_name_handler
self._validate_after_sanitize = validate_after_sanitize
self._validator = validator
|
(self, validator: pathvalidate._base.AbstractValidator, max_len: int, fs_encoding: Optional[str], validate_after_sanitize: bool, null_value_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, reserved_name_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, additional_reserved_names: Optional[Sequence[str]] = None, platform_max_len: Optional[int] = None, platform: Optional[~PlatformType] = None) -> NoneType
|
708,673
|
pathvalidate._base
|
_get_default_max_path_len
| null |
def _get_default_max_path_len(self) -> int:
if self._is_linux():
return 4096
if self._is_windows():
return 260
if self._is_posix() or self._is_macos():
return 1024
return 260 # universal
|
(self) -> int
|
708,674
|
pathvalidate._base
|
_is_linux
| null |
def _is_linux(self, include_universal: bool = False) -> bool:
if include_universal:
return self.platform in (Platform.UNIVERSAL, Platform.LINUX)
return self.platform == Platform.LINUX
|
(self, include_universal: bool = False) -> bool
|
708,675
|
pathvalidate._base
|
_is_macos
| null |
def _is_macos(self, include_universal: bool = False) -> bool:
if include_universal:
return self.platform in (Platform.UNIVERSAL, Platform.MACOS)
return self.platform == Platform.MACOS
|
(self, include_universal: bool = False) -> bool
|
708,676
|
pathvalidate._base
|
_is_posix
| null |
def _is_posix(self) -> bool:
return self.platform == Platform.POSIX
|
(self) -> bool
|
708,677
|
pathvalidate._base
|
_is_universal
| null |
def _is_universal(self) -> bool:
return self.platform == Platform.UNIVERSAL
|
(self) -> bool
|
708,678
|
pathvalidate._base
|
_is_windows
| null |
def _is_windows(self, include_universal: bool = False) -> bool:
if include_universal:
return self.platform in (Platform.UNIVERSAL, Platform.WINDOWS)
return self.platform == Platform.WINDOWS
|
(self, include_universal: bool = False) -> bool
|
708,679
|
pathvalidate._base
|
sanitize
| null |
@abc.abstractmethod
def sanitize(self, value: PathType, replacement_text: str = "") -> PathType: # pragma: no cover
pass
|
(self, value: ~PathType, replacement_text: str = '') -> ~PathType
|
708,680
|
pathvalidate._base
|
AbstractValidator
| null |
class AbstractValidator(BaseFile, metaclass=abc.ABCMeta):
def __init__(
self,
max_len: int,
fs_encoding: Optional[str],
check_reserved: bool,
additional_reserved_names: Optional[Sequence[str]] = None,
platform_max_len: Optional[int] = None,
platform: Optional[PlatformType] = None,
) -> None:
self._check_reserved = check_reserved
super().__init__(
max_len,
fs_encoding,
additional_reserved_names=additional_reserved_names,
platform_max_len=platform_max_len,
platform=platform,
)
@abc.abstractproperty
def min_len(self) -> int: # pragma: no cover
pass
@abc.abstractmethod
def validate(self, value: PathType) -> None: # pragma: no cover
pass
def is_valid(self, value: PathType) -> bool:
try:
self.validate(value)
except (TypeError, ValidationError):
return False
return True
def _is_reserved_keyword(self, value: str) -> bool:
return value in self.reserved_keywords
|
(max_len: int, fs_encoding: Optional[str], check_reserved: bool, additional_reserved_names: Optional[Sequence[str]] = None, platform_max_len: Optional[int] = None, platform: Optional[~PlatformType] = None) -> None
|
708,681
|
pathvalidate._base
|
__init__
| null |
def __init__(
self,
max_len: int,
fs_encoding: Optional[str],
check_reserved: bool,
additional_reserved_names: Optional[Sequence[str]] = None,
platform_max_len: Optional[int] = None,
platform: Optional[PlatformType] = None,
) -> None:
self._check_reserved = check_reserved
super().__init__(
max_len,
fs_encoding,
additional_reserved_names=additional_reserved_names,
platform_max_len=platform_max_len,
platform=platform,
)
|
(self, max_len: int, fs_encoding: Optional[str], check_reserved: bool, additional_reserved_names: Optional[Sequence[str]] = None, platform_max_len: Optional[int] = None, platform: Optional[~PlatformType] = None) -> NoneType
|
708,686
|
pathvalidate._base
|
_is_reserved_keyword
| null |
def _is_reserved_keyword(self, value: str) -> bool:
return value in self.reserved_keywords
|
(self, value: str) -> bool
|
708,689
|
pathvalidate._base
|
is_valid
| null |
def is_valid(self, value: PathType) -> bool:
try:
self.validate(value)
except (TypeError, ValidationError):
return False
return True
|
(self, value: ~PathType) -> bool
|
708,690
|
pathvalidate._base
|
validate
| null |
@abc.abstractmethod
def validate(self, value: PathType) -> None: # pragma: no cover
pass
|
(self, value: ~PathType) -> NoneType
|
708,691
|
pathvalidate.error
|
ErrorReason
|
Validation error reasons.
|
class ErrorReason(enum.Enum):
"""
Validation error reasons.
"""
NULL_NAME = (_to_error_code(1001), "NULL_NAME", "the value must not be an empty")
RESERVED_NAME = (
_to_error_code(1002),
"RESERVED_NAME",
"found a reserved name by a platform",
)
INVALID_CHARACTER = (
_to_error_code(1100),
"INVALID_CHARACTER",
"invalid characters found",
)
INVALID_LENGTH = (
_to_error_code(1101),
"INVALID_LENGTH",
"found an invalid string length",
)
FOUND_ABS_PATH = (
_to_error_code(1200),
"FOUND_ABS_PATH",
"found an absolute path where must be a relative path",
)
MALFORMED_ABS_PATH = (
_to_error_code(1201),
"MALFORMED_ABS_PATH",
"found a malformed absolute path",
)
INVALID_AFTER_SANITIZE = (
_to_error_code(2000),
"INVALID_AFTER_SANITIZE",
"found invalid value after sanitizing",
)
@property
def code(self) -> str:
"""str: Error code."""
return self.__code
@property
def name(self) -> str:
"""str: Error reason name."""
return self.__name
@property
def description(self) -> str:
"""str: Error reason description."""
return self.__description
def __init__(self, code: str, name: str, description: str) -> None:
self.__name = name
self.__code = code
self.__description = description
def __str__(self) -> str:
return f"[{self.__code}] {self.__description}"
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
708,692
|
pathvalidate._filename
|
FileNameSanitizer
| null |
class FileNameSanitizer(AbstractSanitizer):
def __init__(
self,
max_len: int = _DEFAULT_MAX_FILENAME_LEN,
fs_encoding: Optional[str] = None,
platform: Optional[PlatformType] = None,
null_value_handler: Optional[ValidationErrorHandler] = None,
reserved_name_handler: Optional[ValidationErrorHandler] = None,
additional_reserved_names: Optional[Sequence[str]] = None,
validate_after_sanitize: bool = False,
validator: Optional[AbstractValidator] = None,
) -> None:
if validator:
fname_validator = validator
else:
fname_validator = FileNameValidator(
min_len=DEFAULT_MIN_LEN,
max_len=max_len,
fs_encoding=fs_encoding,
check_reserved=True,
additional_reserved_names=additional_reserved_names,
platform=platform,
)
super().__init__(
max_len=max_len,
fs_encoding=fs_encoding,
null_value_handler=null_value_handler,
reserved_name_handler=reserved_name_handler,
additional_reserved_names=additional_reserved_names,
platform_max_len=_DEFAULT_MAX_FILENAME_LEN,
platform=platform,
validate_after_sanitize=validate_after_sanitize,
validator=fname_validator,
)
self._sanitize_regexp = self._get_sanitize_regexp()
def sanitize(self, value: PathType, replacement_text: str = "") -> PathType:
try:
validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
except ValidationError as e:
if e.reason == ErrorReason.NULL_NAME:
if isinstance(value, PurePath):
raise
return self._null_value_handler(e)
raise
sanitized_filename = self._sanitize_regexp.sub(replacement_text, str(value))
sanitized_filename = sanitized_filename[: self.max_len]
try:
self._validator.validate(sanitized_filename)
except ValidationError as e:
if e.reason == ErrorReason.RESERVED_NAME:
replacement_word = self._reserved_name_handler(e)
if e.reserved_name != replacement_word:
sanitized_filename = re.sub(
re.escape(e.reserved_name), replacement_word, sanitized_filename
)
elif e.reason == ErrorReason.INVALID_CHARACTER and self._is_windows(
include_universal=True
):
# Do not start a file or directory name with a space
sanitized_filename = sanitized_filename.lstrip(" ")
# Do not end a file or directory name with a space or a period
sanitized_filename = sanitized_filename.rstrip(" ")
if sanitized_filename not in (".", ".."):
sanitized_filename = sanitized_filename.rstrip(" .")
elif e.reason == ErrorReason.NULL_NAME:
sanitized_filename = self._null_value_handler(e)
if self._validate_after_sanitize:
try:
self._validator.validate(sanitized_filename)
except ValidationError as e:
raise ValidationError(
description=str(e),
reason=ErrorReason.INVALID_AFTER_SANITIZE,
platform=self.platform,
)
if isinstance(value, PurePath):
return Path(sanitized_filename)
return sanitized_filename
def _get_sanitize_regexp(self) -> Pattern[str]:
if self._is_windows(include_universal=True):
return _RE_INVALID_WIN_FILENAME
return _RE_INVALID_FILENAME
|
(max_len: int = 255, fs_encoding: Optional[str] = None, platform: Optional[~PlatformType] = None, null_value_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, reserved_name_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, additional_reserved_names: Optional[Sequence[str]] = None, validate_after_sanitize: bool = False, validator: Optional[pathvalidate._base.AbstractValidator] = None) -> None
|
708,693
|
pathvalidate._filename
|
__init__
| null |
def __init__(
self,
max_len: int = _DEFAULT_MAX_FILENAME_LEN,
fs_encoding: Optional[str] = None,
platform: Optional[PlatformType] = None,
null_value_handler: Optional[ValidationErrorHandler] = None,
reserved_name_handler: Optional[ValidationErrorHandler] = None,
additional_reserved_names: Optional[Sequence[str]] = None,
validate_after_sanitize: bool = False,
validator: Optional[AbstractValidator] = None,
) -> None:
if validator:
fname_validator = validator
else:
fname_validator = FileNameValidator(
min_len=DEFAULT_MIN_LEN,
max_len=max_len,
fs_encoding=fs_encoding,
check_reserved=True,
additional_reserved_names=additional_reserved_names,
platform=platform,
)
super().__init__(
max_len=max_len,
fs_encoding=fs_encoding,
null_value_handler=null_value_handler,
reserved_name_handler=reserved_name_handler,
additional_reserved_names=additional_reserved_names,
platform_max_len=_DEFAULT_MAX_FILENAME_LEN,
platform=platform,
validate_after_sanitize=validate_after_sanitize,
validator=fname_validator,
)
self._sanitize_regexp = self._get_sanitize_regexp()
|
(self, max_len: int = 255, fs_encoding: Optional[str] = None, platform: Optional[~PlatformType] = None, null_value_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, reserved_name_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, additional_reserved_names: Optional[Sequence[str]] = None, validate_after_sanitize: bool = False, validator: Optional[pathvalidate._base.AbstractValidator] = None) -> NoneType
|
708,695
|
pathvalidate._filename
|
_get_sanitize_regexp
| null |
def _get_sanitize_regexp(self) -> Pattern[str]:
if self._is_windows(include_universal=True):
return _RE_INVALID_WIN_FILENAME
return _RE_INVALID_FILENAME
|
(self) -> Pattern[str]
|
708,701
|
pathvalidate._filename
|
sanitize
| null |
def sanitize(self, value: PathType, replacement_text: str = "") -> PathType:
try:
validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
except ValidationError as e:
if e.reason == ErrorReason.NULL_NAME:
if isinstance(value, PurePath):
raise
return self._null_value_handler(e)
raise
sanitized_filename = self._sanitize_regexp.sub(replacement_text, str(value))
sanitized_filename = sanitized_filename[: self.max_len]
try:
self._validator.validate(sanitized_filename)
except ValidationError as e:
if e.reason == ErrorReason.RESERVED_NAME:
replacement_word = self._reserved_name_handler(e)
if e.reserved_name != replacement_word:
sanitized_filename = re.sub(
re.escape(e.reserved_name), replacement_word, sanitized_filename
)
elif e.reason == ErrorReason.INVALID_CHARACTER and self._is_windows(
include_universal=True
):
# Do not start a file or directory name with a space
sanitized_filename = sanitized_filename.lstrip(" ")
# Do not end a file or directory name with a space or a period
sanitized_filename = sanitized_filename.rstrip(" ")
if sanitized_filename not in (".", ".."):
sanitized_filename = sanitized_filename.rstrip(" .")
elif e.reason == ErrorReason.NULL_NAME:
sanitized_filename = self._null_value_handler(e)
if self._validate_after_sanitize:
try:
self._validator.validate(sanitized_filename)
except ValidationError as e:
raise ValidationError(
description=str(e),
reason=ErrorReason.INVALID_AFTER_SANITIZE,
platform=self.platform,
)
if isinstance(value, PurePath):
return Path(sanitized_filename)
return sanitized_filename
|
(self, value: ~PathType, replacement_text: str = '') -> ~PathType
|
708,702
|
pathvalidate._filename
|
FileNameValidator
| null |
class FileNameValidator(BaseValidator):
_WINDOWS_RESERVED_FILE_NAMES = ("CON", "PRN", "AUX", "CLOCK$", "NUL") + tuple(
f"{name:s}{num:d}" for name, num in itertools.product(("COM", "LPT"), range(1, 10))
)
_MACOS_RESERVED_FILE_NAMES = (":",)
@property
def reserved_keywords(self) -> Tuple[str, ...]:
common_keywords = super().reserved_keywords
if self._is_universal():
word_set = set(
common_keywords
+ self._WINDOWS_RESERVED_FILE_NAMES
+ self._MACOS_RESERVED_FILE_NAMES
)
elif self._is_windows():
word_set = set(common_keywords + self._WINDOWS_RESERVED_FILE_NAMES)
elif self._is_posix() or self._is_macos():
word_set = set(common_keywords + self._MACOS_RESERVED_FILE_NAMES)
else:
word_set = set(common_keywords)
return tuple(sorted(word_set))
def __init__(
self,
min_len: int = DEFAULT_MIN_LEN,
max_len: int = _DEFAULT_MAX_FILENAME_LEN,
fs_encoding: Optional[str] = None,
platform: Optional[PlatformType] = None,
check_reserved: bool = True,
additional_reserved_names: Optional[Sequence[str]] = None,
) -> None:
super().__init__(
min_len=min_len,
max_len=max_len,
fs_encoding=fs_encoding,
check_reserved=check_reserved,
additional_reserved_names=additional_reserved_names,
platform_max_len=_DEFAULT_MAX_FILENAME_LEN,
platform=platform,
)
def validate(self, value: PathType) -> None:
validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
unicode_filename = to_str(value)
byte_ct = len(unicode_filename.encode(self._fs_encoding))
self.validate_abspath(unicode_filename)
err_kwargs = {
ErrorAttrKey.REASON: ErrorReason.INVALID_LENGTH,
ErrorAttrKey.PLATFORM: self.platform,
ErrorAttrKey.FS_ENCODING: self._fs_encoding,
ErrorAttrKey.BYTE_COUNT: byte_ct,
}
if byte_ct > self.max_len:
raise ValidationError(
[
f"filename is too long: expected<={self.max_len:d} bytes, actual={byte_ct:d} bytes"
],
**err_kwargs,
)
if byte_ct < self.min_len:
raise ValidationError(
[
f"filename is too short: expected>={self.min_len:d} bytes, actual={byte_ct:d} bytes"
],
**err_kwargs,
)
self._validate_reserved_keywords(unicode_filename)
self.__validate_universal_filename(unicode_filename)
if self._is_windows(include_universal=True):
self.__validate_win_filename(unicode_filename)
def validate_abspath(self, value: str) -> None:
err = ValidationError(
description=f"found an absolute path ({value}), expected a filename",
platform=self.platform,
reason=ErrorReason.FOUND_ABS_PATH,
)
if self._is_windows(include_universal=True):
if ntpath.isabs(value):
raise err
if posixpath.isabs(value):
raise err
def __validate_universal_filename(self, unicode_filename: str) -> None:
match = _RE_INVALID_FILENAME.findall(unicode_filename)
if match:
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=findall_to_str(match), value=repr(unicode_filename)
),
platform=Platform.UNIVERSAL,
)
def __validate_win_filename(self, unicode_filename: str) -> None:
match = _RE_INVALID_WIN_FILENAME.findall(unicode_filename)
if match:
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=findall_to_str(match), value=repr(unicode_filename)
),
platform=Platform.WINDOWS,
)
if unicode_filename in (".", ".."):
return
KB2829981_err_tmpl = "{}. Refer: https://learn.microsoft.com/en-us/troubleshoot/windows-client/shell-experience/file-folder-name-whitespace-characters" # noqa: E501
if unicode_filename[-1] in (" ", "."):
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=re.escape(unicode_filename[-1]), value=repr(unicode_filename)
),
platform=Platform.WINDOWS,
description=KB2829981_err_tmpl.format(
"Do not end a file or directory name with a space or a period"
),
)
if unicode_filename[0] in (" "):
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=re.escape(unicode_filename[0]), value=repr(unicode_filename)
),
platform=Platform.WINDOWS,
description=KB2829981_err_tmpl.format(
"Do not start a file or directory name with a space"
),
)
|
(min_len: int = 1, max_len: int = 255, fs_encoding: Optional[str] = None, platform: Optional[~PlatformType] = None, check_reserved: bool = True, additional_reserved_names: Optional[Sequence[str]] = None) -> None
|
708,703
|
pathvalidate._base
|
__extract_root_name
| null |
@staticmethod
def __extract_root_name(path: str) -> str:
return os.path.splitext(os.path.basename(path))[0]
|
(path: str) -> str
|
708,704
|
pathvalidate._filename
|
__validate_universal_filename
| null |
def __validate_universal_filename(self, unicode_filename: str) -> None:
match = _RE_INVALID_FILENAME.findall(unicode_filename)
if match:
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=findall_to_str(match), value=repr(unicode_filename)
),
platform=Platform.UNIVERSAL,
)
|
(self, unicode_filename: str) -> NoneType
|
708,705
|
pathvalidate._filename
|
__validate_win_filename
| null |
def __validate_win_filename(self, unicode_filename: str) -> None:
match = _RE_INVALID_WIN_FILENAME.findall(unicode_filename)
if match:
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=findall_to_str(match), value=repr(unicode_filename)
),
platform=Platform.WINDOWS,
)
if unicode_filename in (".", ".."):
return
KB2829981_err_tmpl = "{}. Refer: https://learn.microsoft.com/en-us/troubleshoot/windows-client/shell-experience/file-folder-name-whitespace-characters" # noqa: E501
if unicode_filename[-1] in (" ", "."):
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=re.escape(unicode_filename[-1]), value=repr(unicode_filename)
),
platform=Platform.WINDOWS,
description=KB2829981_err_tmpl.format(
"Do not end a file or directory name with a space or a period"
),
)
if unicode_filename[0] in (" "):
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=re.escape(unicode_filename[0]), value=repr(unicode_filename)
),
platform=Platform.WINDOWS,
description=KB2829981_err_tmpl.format(
"Do not start a file or directory name with a space"
),
)
|
(self, unicode_filename: str) -> NoneType
|
708,706
|
pathvalidate._filename
|
__init__
| null |
def __init__(
self,
min_len: int = DEFAULT_MIN_LEN,
max_len: int = _DEFAULT_MAX_FILENAME_LEN,
fs_encoding: Optional[str] = None,
platform: Optional[PlatformType] = None,
check_reserved: bool = True,
additional_reserved_names: Optional[Sequence[str]] = None,
) -> None:
super().__init__(
min_len=min_len,
max_len=max_len,
fs_encoding=fs_encoding,
check_reserved=check_reserved,
additional_reserved_names=additional_reserved_names,
platform_max_len=_DEFAULT_MAX_FILENAME_LEN,
platform=platform,
)
|
(self, min_len: int = 1, max_len: int = 255, fs_encoding: Optional[str] = None, platform: Optional[~PlatformType] = None, check_reserved: bool = True, additional_reserved_names: Optional[Sequence[str]] = None) -> NoneType
|
708,714
|
pathvalidate._base
|
_validate_max_len
| null |
def _validate_max_len(self) -> None:
if self.max_len < 1:
raise ValueError("max_len must be greater or equal to one")
if self.min_len > self.max_len:
raise ValueError("min_len must be lower than max_len")
|
(self) -> NoneType
|
708,715
|
pathvalidate._base
|
_validate_reserved_keywords
| null |
def _validate_reserved_keywords(self, name: str) -> None:
if not self._check_reserved:
return
root_name = self.__extract_root_name(name)
base_name = os.path.basename(name).upper()
if self._is_reserved_keyword(root_name.upper()) or self._is_reserved_keyword(
base_name.upper()
):
raise ReservedNameError(
f"'{root_name}' is a reserved name",
reusable_name=False,
reserved_name=root_name,
platform=self.platform,
)
|
(self, name: str) -> NoneType
|
708,717
|
pathvalidate._filename
|
validate
| null |
def validate(self, value: PathType) -> None:
validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
unicode_filename = to_str(value)
byte_ct = len(unicode_filename.encode(self._fs_encoding))
self.validate_abspath(unicode_filename)
err_kwargs = {
ErrorAttrKey.REASON: ErrorReason.INVALID_LENGTH,
ErrorAttrKey.PLATFORM: self.platform,
ErrorAttrKey.FS_ENCODING: self._fs_encoding,
ErrorAttrKey.BYTE_COUNT: byte_ct,
}
if byte_ct > self.max_len:
raise ValidationError(
[
f"filename is too long: expected<={self.max_len:d} bytes, actual={byte_ct:d} bytes"
],
**err_kwargs,
)
if byte_ct < self.min_len:
raise ValidationError(
[
f"filename is too short: expected>={self.min_len:d} bytes, actual={byte_ct:d} bytes"
],
**err_kwargs,
)
self._validate_reserved_keywords(unicode_filename)
self.__validate_universal_filename(unicode_filename)
if self._is_windows(include_universal=True):
self.__validate_win_filename(unicode_filename)
|
(self, value: ~PathType) -> NoneType
|
708,718
|
pathvalidate._filename
|
validate_abspath
| null |
def validate_abspath(self, value: str) -> None:
err = ValidationError(
description=f"found an absolute path ({value}), expected a filename",
platform=self.platform,
reason=ErrorReason.FOUND_ABS_PATH,
)
if self._is_windows(include_universal=True):
if ntpath.isabs(value):
raise err
if posixpath.isabs(value):
raise err
|
(self, value: str) -> NoneType
|
708,719
|
pathvalidate._filepath
|
FilePathSanitizer
| null |
class FilePathSanitizer(AbstractSanitizer):
def __init__(
self,
max_len: int = -1,
fs_encoding: Optional[str] = None,
platform: Optional[PlatformType] = None,
null_value_handler: Optional[ValidationErrorHandler] = None,
reserved_name_handler: Optional[ValidationErrorHandler] = None,
additional_reserved_names: Optional[Sequence[str]] = None,
normalize: bool = True,
validate_after_sanitize: bool = False,
validator: Optional[AbstractValidator] = None,
) -> None:
if validator:
fpath_validator = validator
else:
fpath_validator = FilePathValidator(
min_len=DEFAULT_MIN_LEN,
max_len=max_len,
fs_encoding=fs_encoding,
check_reserved=True,
additional_reserved_names=additional_reserved_names,
platform=platform,
)
super().__init__(
max_len=max_len,
fs_encoding=fs_encoding,
validator=fpath_validator,
null_value_handler=null_value_handler,
reserved_name_handler=reserved_name_handler,
additional_reserved_names=additional_reserved_names,
platform=platform,
validate_after_sanitize=validate_after_sanitize,
)
self._sanitize_regexp = self._get_sanitize_regexp()
self.__fname_sanitizer = FileNameSanitizer(
max_len=self.max_len,
fs_encoding=fs_encoding,
null_value_handler=null_value_handler,
reserved_name_handler=reserved_name_handler,
additional_reserved_names=additional_reserved_names,
platform=self.platform,
validate_after_sanitize=validate_after_sanitize,
)
self.__normalize = normalize
if self._is_windows(include_universal=True):
self.__split_drive = ntpath.splitdrive
else:
self.__split_drive = posixpath.splitdrive
def sanitize(self, value: PathType, replacement_text: str = "") -> PathType:
try:
validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
except ValidationError as e:
if e.reason == ErrorReason.NULL_NAME:
if isinstance(value, PurePath):
raise
return self._null_value_handler(e)
raise
unicode_filepath = to_str(value)
drive, unicode_filepath = self.__split_drive(unicode_filepath)
unicode_filepath = self._sanitize_regexp.sub(replacement_text, unicode_filepath)
if self.__normalize and unicode_filepath:
unicode_filepath = os.path.normpath(unicode_filepath)
sanitized_path = unicode_filepath
sanitized_entries: List[str] = []
if drive:
sanitized_entries.append(drive)
for entry in sanitized_path.replace("\\", "/").split("/"):
if entry in _NTFS_RESERVED_FILE_NAMES:
sanitized_entries.append(f"{entry}_")
continue
sanitized_entry = str(
self.__fname_sanitizer.sanitize(entry, replacement_text=replacement_text)
)
if not sanitized_entry:
if not sanitized_entries:
sanitized_entries.append("")
continue
sanitized_entries.append(sanitized_entry)
sanitized_path = self.__get_path_separator().join(sanitized_entries)
try:
self._validator.validate(sanitized_path)
except ValidationError as e:
if e.reason == ErrorReason.NULL_NAME:
sanitized_path = self._null_value_handler(e)
if self._validate_after_sanitize:
self._validator.validate(sanitized_path)
if isinstance(value, PurePath):
return Path(sanitized_path)
return sanitized_path
def _get_sanitize_regexp(self) -> Pattern[str]:
if self._is_windows(include_universal=True):
return _RE_INVALID_WIN_PATH
return _RE_INVALID_PATH
def __get_path_separator(self) -> str:
if self._is_windows():
return "\\"
return "/"
|
(max_len: int = -1, fs_encoding: Optional[str] = None, platform: Optional[~PlatformType] = None, null_value_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, reserved_name_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, additional_reserved_names: Optional[Sequence[str]] = None, normalize: bool = True, validate_after_sanitize: bool = False, validator: Optional[pathvalidate._base.AbstractValidator] = None) -> None
|
708,720
|
pathvalidate._filepath
|
__get_path_separator
| null |
def __get_path_separator(self) -> str:
if self._is_windows():
return "\\"
return "/"
|
(self) -> str
|
708,721
|
pathvalidate._filepath
|
__init__
| null |
def __init__(
self,
max_len: int = -1,
fs_encoding: Optional[str] = None,
platform: Optional[PlatformType] = None,
null_value_handler: Optional[ValidationErrorHandler] = None,
reserved_name_handler: Optional[ValidationErrorHandler] = None,
additional_reserved_names: Optional[Sequence[str]] = None,
normalize: bool = True,
validate_after_sanitize: bool = False,
validator: Optional[AbstractValidator] = None,
) -> None:
if validator:
fpath_validator = validator
else:
fpath_validator = FilePathValidator(
min_len=DEFAULT_MIN_LEN,
max_len=max_len,
fs_encoding=fs_encoding,
check_reserved=True,
additional_reserved_names=additional_reserved_names,
platform=platform,
)
super().__init__(
max_len=max_len,
fs_encoding=fs_encoding,
validator=fpath_validator,
null_value_handler=null_value_handler,
reserved_name_handler=reserved_name_handler,
additional_reserved_names=additional_reserved_names,
platform=platform,
validate_after_sanitize=validate_after_sanitize,
)
self._sanitize_regexp = self._get_sanitize_regexp()
self.__fname_sanitizer = FileNameSanitizer(
max_len=self.max_len,
fs_encoding=fs_encoding,
null_value_handler=null_value_handler,
reserved_name_handler=reserved_name_handler,
additional_reserved_names=additional_reserved_names,
platform=self.platform,
validate_after_sanitize=validate_after_sanitize,
)
self.__normalize = normalize
if self._is_windows(include_universal=True):
self.__split_drive = ntpath.splitdrive
else:
self.__split_drive = posixpath.splitdrive
|
(self, max_len: int = -1, fs_encoding: Optional[str] = None, platform: Optional[~PlatformType] = None, null_value_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, reserved_name_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, additional_reserved_names: Optional[Sequence[str]] = None, normalize: bool = True, validate_after_sanitize: bool = False, validator: Optional[pathvalidate._base.AbstractValidator] = None) -> NoneType
|
708,723
|
pathvalidate._filepath
|
_get_sanitize_regexp
| null |
def _get_sanitize_regexp(self) -> Pattern[str]:
if self._is_windows(include_universal=True):
return _RE_INVALID_WIN_PATH
return _RE_INVALID_PATH
|
(self) -> Pattern[str]
|
708,729
|
pathvalidate._filepath
|
sanitize
| null |
def sanitize(self, value: PathType, replacement_text: str = "") -> PathType:
try:
validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
except ValidationError as e:
if e.reason == ErrorReason.NULL_NAME:
if isinstance(value, PurePath):
raise
return self._null_value_handler(e)
raise
unicode_filepath = to_str(value)
drive, unicode_filepath = self.__split_drive(unicode_filepath)
unicode_filepath = self._sanitize_regexp.sub(replacement_text, unicode_filepath)
if self.__normalize and unicode_filepath:
unicode_filepath = os.path.normpath(unicode_filepath)
sanitized_path = unicode_filepath
sanitized_entries: List[str] = []
if drive:
sanitized_entries.append(drive)
for entry in sanitized_path.replace("\\", "/").split("/"):
if entry in _NTFS_RESERVED_FILE_NAMES:
sanitized_entries.append(f"{entry}_")
continue
sanitized_entry = str(
self.__fname_sanitizer.sanitize(entry, replacement_text=replacement_text)
)
if not sanitized_entry:
if not sanitized_entries:
sanitized_entries.append("")
continue
sanitized_entries.append(sanitized_entry)
sanitized_path = self.__get_path_separator().join(sanitized_entries)
try:
self._validator.validate(sanitized_path)
except ValidationError as e:
if e.reason == ErrorReason.NULL_NAME:
sanitized_path = self._null_value_handler(e)
if self._validate_after_sanitize:
self._validator.validate(sanitized_path)
if isinstance(value, PurePath):
return Path(sanitized_path)
return sanitized_path
|
(self, value: ~PathType, replacement_text: str = '') -> ~PathType
|
708,730
|
pathvalidate._filepath
|
FilePathValidator
| null |
class FilePathValidator(BaseValidator):
_RE_NTFS_RESERVED = re.compile(
"|".join(f"^/{re.escape(pattern)}$" for pattern in _NTFS_RESERVED_FILE_NAMES),
re.IGNORECASE,
)
_MACOS_RESERVED_FILE_PATHS = ("/", ":")
@property
def reserved_keywords(self) -> Tuple[str, ...]:
common_keywords = super().reserved_keywords
if any([self._is_universal(), self._is_posix(), self._is_macos()]):
return common_keywords + self._MACOS_RESERVED_FILE_PATHS
if self._is_linux():
return common_keywords + ("/",)
return common_keywords
def __init__(
self,
min_len: int = DEFAULT_MIN_LEN,
max_len: int = -1,
fs_encoding: Optional[str] = None,
platform: Optional[PlatformType] = None,
check_reserved: bool = True,
additional_reserved_names: Optional[Sequence[str]] = None,
) -> None:
super().__init__(
min_len=min_len,
max_len=max_len,
fs_encoding=fs_encoding,
check_reserved=check_reserved,
additional_reserved_names=additional_reserved_names,
platform=platform,
)
self.__fname_validator = FileNameValidator(
min_len=min_len,
max_len=max_len,
check_reserved=check_reserved,
additional_reserved_names=additional_reserved_names,
platform=platform,
)
if self._is_windows(include_universal=True):
self.__split_drive = ntpath.splitdrive
else:
self.__split_drive = posixpath.splitdrive
def validate(self, value: PathType) -> None:
validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
self.validate_abspath(value)
_drive, tail = self.__split_drive(value)
if not tail:
return
unicode_filepath = to_str(tail)
byte_ct = len(unicode_filepath.encode(self._fs_encoding))
err_kwargs = {
ErrorAttrKey.REASON: ErrorReason.INVALID_LENGTH,
ErrorAttrKey.PLATFORM: self.platform,
ErrorAttrKey.FS_ENCODING: self._fs_encoding,
ErrorAttrKey.BYTE_COUNT: byte_ct,
}
if byte_ct > self.max_len:
raise ValidationError(
[
f"file path is too long: expected<={self.max_len:d} bytes, actual={byte_ct:d} bytes"
],
**err_kwargs,
)
if byte_ct < self.min_len:
raise ValidationError(
[
"file path is too short: expected>={:d} bytes, actual={:d} bytes".format(
self.min_len, byte_ct
)
],
**err_kwargs,
)
self._validate_reserved_keywords(unicode_filepath)
unicode_filepath = unicode_filepath.replace("\\", "/")
for entry in unicode_filepath.split("/"):
if not entry or entry in (".", ".."):
continue
self.__fname_validator._validate_reserved_keywords(entry)
if self._is_windows(include_universal=True):
self.__validate_win_filepath(unicode_filepath)
else:
self.__validate_unix_filepath(unicode_filepath)
def validate_abspath(self, value: PathType) -> None:
is_posix_abs = posixpath.isabs(value)
is_nt_abs = ntpath.isabs(value)
err_object = ValidationError(
description=(
"an invalid absolute file path ({}) for the platform ({}).".format(
value, self.platform.value
)
+ " to avoid the error, specify an appropriate platform corresponding to"
+ " the path format or 'auto'."
),
platform=self.platform,
reason=ErrorReason.MALFORMED_ABS_PATH,
)
if any([self._is_windows() and is_nt_abs, self._is_linux() and is_posix_abs]):
return
if self._is_universal() and any([is_posix_abs, is_nt_abs]):
ValidationError(
description=(
("POSIX style" if is_posix_abs else "NT style")
+ " absolute file path found. expected a platform-independent file path."
),
platform=self.platform,
reason=ErrorReason.MALFORMED_ABS_PATH,
)
if self._is_windows(include_universal=True) and is_posix_abs:
raise err_object
drive, _tail = ntpath.splitdrive(value)
if not self._is_windows() and drive and is_nt_abs:
raise err_object
def __validate_unix_filepath(self, unicode_filepath: str) -> None:
match = _RE_INVALID_PATH.findall(unicode_filepath)
if match:
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=findall_to_str(match), value=repr(unicode_filepath)
)
)
def __validate_win_filepath(self, unicode_filepath: str) -> None:
match = _RE_INVALID_WIN_PATH.findall(unicode_filepath)
if match:
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=findall_to_str(match), value=repr(unicode_filepath)
),
platform=Platform.WINDOWS,
)
_drive, value = self.__split_drive(unicode_filepath)
if value:
match_reserved = self._RE_NTFS_RESERVED.search(value)
if match_reserved:
reserved_name = match_reserved.group()
raise ReservedNameError(
f"'{reserved_name}' is a reserved name",
reusable_name=False,
reserved_name=reserved_name,
platform=self.platform,
)
|
(min_len: int = 1, max_len: int = -1, fs_encoding: Optional[str] = None, platform: Optional[~PlatformType] = None, check_reserved: bool = True, additional_reserved_names: Optional[Sequence[str]] = None) -> None
|
708,732
|
pathvalidate._filepath
|
__validate_unix_filepath
| null |
def __validate_unix_filepath(self, unicode_filepath: str) -> None:
match = _RE_INVALID_PATH.findall(unicode_filepath)
if match:
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=findall_to_str(match), value=repr(unicode_filepath)
)
)
|
(self, unicode_filepath: str) -> NoneType
|
708,733
|
pathvalidate._filepath
|
__validate_win_filepath
| null |
def __validate_win_filepath(self, unicode_filepath: str) -> None:
match = _RE_INVALID_WIN_PATH.findall(unicode_filepath)
if match:
raise InvalidCharError(
INVALID_CHAR_ERR_MSG_TMPL.format(
invalid=findall_to_str(match), value=repr(unicode_filepath)
),
platform=Platform.WINDOWS,
)
_drive, value = self.__split_drive(unicode_filepath)
if value:
match_reserved = self._RE_NTFS_RESERVED.search(value)
if match_reserved:
reserved_name = match_reserved.group()
raise ReservedNameError(
f"'{reserved_name}' is a reserved name",
reusable_name=False,
reserved_name=reserved_name,
platform=self.platform,
)
|
(self, unicode_filepath: str) -> NoneType
|
708,734
|
pathvalidate._filepath
|
__init__
| null |
def __init__(
self,
min_len: int = DEFAULT_MIN_LEN,
max_len: int = -1,
fs_encoding: Optional[str] = None,
platform: Optional[PlatformType] = None,
check_reserved: bool = True,
additional_reserved_names: Optional[Sequence[str]] = None,
) -> None:
super().__init__(
min_len=min_len,
max_len=max_len,
fs_encoding=fs_encoding,
check_reserved=check_reserved,
additional_reserved_names=additional_reserved_names,
platform=platform,
)
self.__fname_validator = FileNameValidator(
min_len=min_len,
max_len=max_len,
check_reserved=check_reserved,
additional_reserved_names=additional_reserved_names,
platform=platform,
)
if self._is_windows(include_universal=True):
self.__split_drive = ntpath.splitdrive
else:
self.__split_drive = posixpath.splitdrive
|
(self, min_len: int = 1, max_len: int = -1, fs_encoding: Optional[str] = None, platform: Optional[~PlatformType] = None, check_reserved: bool = True, additional_reserved_names: Optional[Sequence[str]] = None) -> NoneType
|
708,745
|
pathvalidate._filepath
|
validate
| null |
def validate(self, value: PathType) -> None:
validate_pathtype(value, allow_whitespaces=not self._is_windows(include_universal=True))
self.validate_abspath(value)
_drive, tail = self.__split_drive(value)
if not tail:
return
unicode_filepath = to_str(tail)
byte_ct = len(unicode_filepath.encode(self._fs_encoding))
err_kwargs = {
ErrorAttrKey.REASON: ErrorReason.INVALID_LENGTH,
ErrorAttrKey.PLATFORM: self.platform,
ErrorAttrKey.FS_ENCODING: self._fs_encoding,
ErrorAttrKey.BYTE_COUNT: byte_ct,
}
if byte_ct > self.max_len:
raise ValidationError(
[
f"file path is too long: expected<={self.max_len:d} bytes, actual={byte_ct:d} bytes"
],
**err_kwargs,
)
if byte_ct < self.min_len:
raise ValidationError(
[
"file path is too short: expected>={:d} bytes, actual={:d} bytes".format(
self.min_len, byte_ct
)
],
**err_kwargs,
)
self._validate_reserved_keywords(unicode_filepath)
unicode_filepath = unicode_filepath.replace("\\", "/")
for entry in unicode_filepath.split("/"):
if not entry or entry in (".", ".."):
continue
self.__fname_validator._validate_reserved_keywords(entry)
if self._is_windows(include_universal=True):
self.__validate_win_filepath(unicode_filepath)
else:
self.__validate_unix_filepath(unicode_filepath)
|
(self, value: ~PathType) -> NoneType
|
708,746
|
pathvalidate._filepath
|
validate_abspath
| null |
def validate_abspath(self, value: PathType) -> None:
is_posix_abs = posixpath.isabs(value)
is_nt_abs = ntpath.isabs(value)
err_object = ValidationError(
description=(
"an invalid absolute file path ({}) for the platform ({}).".format(
value, self.platform.value
)
+ " to avoid the error, specify an appropriate platform corresponding to"
+ " the path format or 'auto'."
),
platform=self.platform,
reason=ErrorReason.MALFORMED_ABS_PATH,
)
if any([self._is_windows() and is_nt_abs, self._is_linux() and is_posix_abs]):
return
if self._is_universal() and any([is_posix_abs, is_nt_abs]):
ValidationError(
description=(
("POSIX style" if is_posix_abs else "NT style")
+ " absolute file path found. expected a platform-independent file path."
),
platform=self.platform,
reason=ErrorReason.MALFORMED_ABS_PATH,
)
if self._is_windows(include_universal=True) and is_posix_abs:
raise err_object
drive, _tail = ntpath.splitdrive(value)
if not self._is_windows() and drive and is_nt_abs:
raise err_object
|
(self, value: ~PathType) -> NoneType
|
708,747
|
pathvalidate.error
|
InvalidCharError
|
Exception raised when includes invalid character(s) within a string.
|
class InvalidCharError(ValidationError):
"""
Exception raised when includes invalid character(s) within a string.
"""
def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
kwargs[ErrorAttrKey.REASON] = ErrorReason.INVALID_CHARACTER
super().__init__(args, **kwargs)
|
(*args, **kwargs) -> None
|
708,748
|
pathvalidate.error
|
__init__
| null |
def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
kwargs[ErrorAttrKey.REASON] = ErrorReason.INVALID_CHARACTER
super().__init__(args, **kwargs)
|
(self, *args, **kwargs) -> NoneType
|
708,752
|
pathvalidate.error
|
InvalidReservedNameError
|
[Deprecated]
Exception raised when a string matched a reserved name.
Moreover, the reserved name is invalid as a name.
|
class InvalidReservedNameError(ReservedNameError):
"""[Deprecated]
Exception raised when a string matched a reserved name.
Moreover, the reserved name is invalid as a name.
"""
def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
kwargs[ErrorAttrKey.REUSABLE_NAME] = False
super().__init__(args, **kwargs)
|
(*args, **kwargs) -> None
|
708,753
|
pathvalidate.error
|
__init__
| null |
def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
kwargs[ErrorAttrKey.REUSABLE_NAME] = False
super().__init__(args, **kwargs)
|
(self, *args, **kwargs) -> NoneType
|
708,757
|
pathvalidate.error
|
NullNameError
|
[Deprecated]
Exception raised when a name is empty.
|
class NullNameError(ValidationError):
"""[Deprecated]
Exception raised when a name is empty.
"""
def __init__(self, *args, **kwargs) -> None: # type: ignore
kwargs[ErrorAttrKey.REASON] = ErrorReason.NULL_NAME
super().__init__(args, **kwargs)
|
(*args, **kwargs) -> None
|
708,758
|
pathvalidate.error
|
__init__
| null |
def __init__(self, *args, **kwargs) -> None: # type: ignore
kwargs[ErrorAttrKey.REASON] = ErrorReason.NULL_NAME
super().__init__(args, **kwargs)
|
(self, *args, **kwargs) -> NoneType
|
708,762
|
pathvalidate._const
|
Platform
|
Platform specifier enumeration.
|
class Platform(enum.Enum):
"""
Platform specifier enumeration.
"""
#: POSIX compatible platform.
POSIX = "POSIX"
#: platform independent. note that absolute paths cannot specify this.
UNIVERSAL = "universal"
LINUX = "Linux"
WINDOWS = "Windows"
MACOS = "macOS"
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
708,763
|
pathvalidate.error
|
ReservedNameError
|
Exception raised when a string matched a reserved name.
|
class ReservedNameError(ValidationError):
"""
Exception raised when a string matched a reserved name.
"""
def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
kwargs[ErrorAttrKey.REASON] = ErrorReason.RESERVED_NAME
super().__init__(args, **kwargs)
|
(*args, **kwargs) -> None
|
708,764
|
pathvalidate.error
|
__init__
| null |
def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
kwargs[ErrorAttrKey.REASON] = ErrorReason.RESERVED_NAME
super().__init__(args, **kwargs)
|
(self, *args, **kwargs) -> NoneType
|
708,768
|
pathvalidate.error
|
ValidReservedNameError
|
[Deprecated]
Exception raised when a string matched a reserved name.
However, it can be used as a name.
|
class ValidReservedNameError(ReservedNameError):
"""[Deprecated]
Exception raised when a string matched a reserved name.
However, it can be used as a name.
"""
def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
kwargs[ErrorAttrKey.REUSABLE_NAME] = True
super().__init__(args, **kwargs)
|
(*args, **kwargs) -> None
|
708,769
|
pathvalidate.error
|
__init__
| null |
def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
kwargs[ErrorAttrKey.REUSABLE_NAME] = True
super().__init__(args, **kwargs)
|
(self, *args, **kwargs) -> NoneType
|
708,788
|
pathvalidate._filename
|
is_valid_filename
|
Check whether the ``filename`` is a valid name or not.
Args:
filename:
A filename to be checked.
platform:
Target platform name of the filename.
Example:
:ref:`example-is-valid-filename`
See Also:
:py:func:`.validate_filename()`
|
def is_valid_filename(
filename: PathType,
platform: Optional[PlatformType] = None,
min_len: int = DEFAULT_MIN_LEN,
max_len: Optional[int] = None,
fs_encoding: Optional[str] = None,
check_reserved: bool = True,
additional_reserved_names: Optional[Sequence[str]] = None,
) -> bool:
"""Check whether the ``filename`` is a valid name or not.
Args:
filename:
A filename to be checked.
platform:
Target platform name of the filename.
Example:
:ref:`example-is-valid-filename`
See Also:
:py:func:`.validate_filename()`
"""
return FileNameValidator(
platform=platform,
min_len=min_len,
max_len=-1 if max_len is None else max_len,
fs_encoding=fs_encoding,
check_reserved=check_reserved,
additional_reserved_names=additional_reserved_names,
).is_valid(filename)
|
(filename: ~PathType, platform: Optional[~PlatformType] = None, min_len: int = 1, max_len: Optional[int] = None, fs_encoding: Optional[str] = None, check_reserved: bool = True, additional_reserved_names: Optional[Sequence[str]] = None) -> bool
|
708,789
|
pathvalidate._filepath
|
is_valid_filepath
|
Check whether the ``file_path`` is a valid name or not.
Args:
file_path:
A filepath to be checked.
platform:
Target platform name of the file path.
Example:
:ref:`example-is-valid-filepath`
See Also:
:py:func:`.validate_filepath()`
|
def is_valid_filepath(
file_path: PathType,
platform: Optional[PlatformType] = None,
min_len: int = DEFAULT_MIN_LEN,
max_len: Optional[int] = None,
fs_encoding: Optional[str] = None,
check_reserved: bool = True,
additional_reserved_names: Optional[Sequence[str]] = None,
) -> bool:
"""Check whether the ``file_path`` is a valid name or not.
Args:
file_path:
A filepath to be checked.
platform:
Target platform name of the file path.
Example:
:ref:`example-is-valid-filepath`
See Also:
:py:func:`.validate_filepath()`
"""
return FilePathValidator(
platform=platform,
min_len=min_len,
max_len=-1 if max_len is None else max_len,
fs_encoding=fs_encoding,
check_reserved=check_reserved,
additional_reserved_names=additional_reserved_names,
).is_valid(file_path)
|
(file_path: ~PathType, platform: Optional[~PlatformType] = None, min_len: int = 1, max_len: Optional[int] = None, fs_encoding: Optional[str] = None, check_reserved: bool = True, additional_reserved_names: Optional[Sequence[str]] = None) -> bool
|
708,790
|
pathvalidate._common
|
normalize_platform
| null |
def normalize_platform(name: Optional[PlatformType]) -> Platform:
if isinstance(name, Platform):
return name
if not name:
return Platform.UNIVERSAL
name = name.strip().casefold()
if name == "posix":
return Platform.POSIX
if name == "auto":
name = platform.system().casefold()
if name in ["linux"]:
return Platform.LINUX
if name and name.startswith("win"):
return Platform.WINDOWS
if name in ["mac", "macos", "darwin"]:
return Platform.MACOS
return Platform.UNIVERSAL
|
(name: Optional[~PlatformType]) -> pathvalidate._const.Platform
|
708,791
|
pathvalidate._common
|
replace_ansi_escape
| null |
def replace_ansi_escape(text: str, replacement_text: str = "") -> str:
try:
return __RE_ANSI_ESCAPE.sub(replacement_text, text)
except (TypeError, AttributeError):
raise TypeError("text must be a string")
|
(text: str, replacement_text: str = '') -> str
|
708,792
|
pathvalidate._symbol
|
replace_symbol
|
Replace all of the symbols in the ``text``.
Args:
text:
Input text.
replacement_text:
Replacement text.
exclude_symbols:
Symbols that exclude from the replacement.
is_replace_consecutive_chars:
If |True|, replace consecutive multiple ``replacement_text`` characters
to a single character.
is_strip:
If |True|, strip ``replacement_text`` from the beginning/end of the replacement text.
Returns:
A replacement string.
Example:
:ref:`example-sanitize-symbol`
|
def replace_symbol(
text: str,
replacement_text: str = "",
exclude_symbols: Sequence[str] = [],
is_replace_consecutive_chars: bool = False,
is_strip: bool = False,
) -> str:
"""
Replace all of the symbols in the ``text``.
Args:
text:
Input text.
replacement_text:
Replacement text.
exclude_symbols:
Symbols that exclude from the replacement.
is_replace_consecutive_chars:
If |True|, replace consecutive multiple ``replacement_text`` characters
to a single character.
is_strip:
If |True|, strip ``replacement_text`` from the beginning/end of the replacement text.
Returns:
A replacement string.
Example:
:ref:`example-sanitize-symbol`
"""
if exclude_symbols:
regexp = re.compile(
"[{}]".format(
re.escape(
"".join(set(ascii_symbols + unprintable_ascii_chars) - set(exclude_symbols))
)
),
re.UNICODE,
)
else:
regexp = __RE_SYMBOL
try:
new_text = regexp.sub(replacement_text, to_str(text))
except TypeError:
raise TypeError("text must be a string")
if not replacement_text:
return new_text
if is_replace_consecutive_chars:
new_text = re.sub(f"{re.escape(replacement_text)}+", replacement_text, new_text)
if is_strip:
new_text = new_text.strip(replacement_text)
return new_text
|
(text: str, replacement_text: str = '', exclude_symbols: Sequence[str] = [], is_replace_consecutive_chars: bool = False, is_strip: bool = False) -> str
|
708,793
|
pathvalidate._common
|
replace_unprintable_char
| null |
def replace_unprintable_char(text: str, replacement_text: str = "") -> str:
try:
return __RE_UNPRINTABLE_CHARS.sub(replacement_text, text)
except (TypeError, AttributeError):
raise TypeError("text must be a string")
|
(text: str, replacement_text: str = '') -> str
|
708,794
|
pathvalidate._filename
|
sanitize_filename
|
Make a valid filename from a string.
To make a valid filename, the function does the following:
- Replace invalid characters as file names included in the ``filename``
with the ``replacement_text``. Invalid characters are:
- unprintable characters
- |invalid_filename_chars|
- for Windows (or universal) only: |invalid_win_filename_chars|
- Replace a value if a sanitized value is a reserved name by operating systems
with a specified handler by ``reserved_name_handler``.
Args:
filename: Filename to sanitize.
replacement_text:
Replacement text for invalid characters. Defaults to ``""``.
platform:
Target platform name of the filename.
.. include:: platform.txt
max_len:
Maximum byte length of the ``filename``.
Truncate the name length if the ``filename`` length exceeds this value.
Defaults to ``255``.
fs_encoding:
Filesystem encoding that used to calculate the byte length of the filename.
If |None|, get the value from the execution environment.
check_reserved:
[Deprecated] Use 'reserved_name_handler' instead.
null_value_handler:
Function called when a value after sanitization is an empty string.
You can specify predefined handlers:
- :py:func:`~.handler.NullValueHandler.return_null_string`
- :py:func:`~.handler.NullValueHandler.return_timestamp`
- :py:func:`~.handler.raise_error`
Defaults to :py:func:`.handler.NullValueHandler.return_null_string` that just return ``""``.
reserved_name_handler:
Function called when a value after sanitization is a reserved name.
You can specify predefined handlers:
- :py:meth:`~.handler.ReservedNameHandler.add_leading_underscore`
- :py:meth:`~.handler.ReservedNameHandler.add_trailing_underscore`
- :py:meth:`~.handler.ReservedNameHandler.as_is`
- :py:func:`~.handler.raise_error`
Defaults to :py:func:`.handler.add_trailing_underscore`.
additional_reserved_names:
Additional reserved names to sanitize.
Case insensitive.
validate_after_sanitize:
Execute validation after sanitization to the file name.
Returns:
Same type as the ``filename`` (str or PathLike object):
Sanitized filename.
Raises:
ValueError:
If the ``filename`` is an invalid filename.
Example:
:ref:`example-sanitize-filename`
|
def sanitize_filename(
filename: PathType,
replacement_text: str = "",
platform: Optional[PlatformType] = None,
max_len: Optional[int] = _DEFAULT_MAX_FILENAME_LEN,
fs_encoding: Optional[str] = None,
check_reserved: Optional[bool] = None,
null_value_handler: Optional[ValidationErrorHandler] = None,
reserved_name_handler: Optional[ValidationErrorHandler] = None,
additional_reserved_names: Optional[Sequence[str]] = None,
validate_after_sanitize: bool = False,
) -> PathType:
"""Make a valid filename from a string.
To make a valid filename, the function does the following:
- Replace invalid characters as file names included in the ``filename``
with the ``replacement_text``. Invalid characters are:
- unprintable characters
- |invalid_filename_chars|
- for Windows (or universal) only: |invalid_win_filename_chars|
- Replace a value if a sanitized value is a reserved name by operating systems
with a specified handler by ``reserved_name_handler``.
Args:
filename: Filename to sanitize.
replacement_text:
Replacement text for invalid characters. Defaults to ``""``.
platform:
Target platform name of the filename.
.. include:: platform.txt
max_len:
Maximum byte length of the ``filename``.
Truncate the name length if the ``filename`` length exceeds this value.
Defaults to ``255``.
fs_encoding:
Filesystem encoding that used to calculate the byte length of the filename.
If |None|, get the value from the execution environment.
check_reserved:
[Deprecated] Use 'reserved_name_handler' instead.
null_value_handler:
Function called when a value after sanitization is an empty string.
You can specify predefined handlers:
- :py:func:`~.handler.NullValueHandler.return_null_string`
- :py:func:`~.handler.NullValueHandler.return_timestamp`
- :py:func:`~.handler.raise_error`
Defaults to :py:func:`.handler.NullValueHandler.return_null_string` that just return ``""``.
reserved_name_handler:
Function called when a value after sanitization is a reserved name.
You can specify predefined handlers:
- :py:meth:`~.handler.ReservedNameHandler.add_leading_underscore`
- :py:meth:`~.handler.ReservedNameHandler.add_trailing_underscore`
- :py:meth:`~.handler.ReservedNameHandler.as_is`
- :py:func:`~.handler.raise_error`
Defaults to :py:func:`.handler.add_trailing_underscore`.
additional_reserved_names:
Additional reserved names to sanitize.
Case insensitive.
validate_after_sanitize:
Execute validation after sanitization to the file name.
Returns:
Same type as the ``filename`` (str or PathLike object):
Sanitized filename.
Raises:
ValueError:
If the ``filename`` is an invalid filename.
Example:
:ref:`example-sanitize-filename`
"""
if check_reserved is not None:
warnings.warn(
"'check_reserved' is deprecated. Use 'reserved_name_handler' instead.",
DeprecationWarning,
)
if check_reserved is False:
reserved_name_handler = ReservedNameHandler.as_is
return FileNameSanitizer(
platform=platform,
max_len=-1 if max_len is None else max_len,
fs_encoding=fs_encoding,
null_value_handler=null_value_handler,
reserved_name_handler=reserved_name_handler,
additional_reserved_names=additional_reserved_names,
validate_after_sanitize=validate_after_sanitize,
).sanitize(filename, replacement_text)
|
(filename: ~PathType, replacement_text: str = '', platform: Optional[~PlatformType] = None, max_len: Optional[int] = 255, fs_encoding: Optional[str] = None, check_reserved: Optional[bool] = None, null_value_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, reserved_name_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, additional_reserved_names: Optional[Sequence[str]] = None, validate_after_sanitize: bool = False) -> ~PathType
|
708,795
|
pathvalidate._filepath
|
sanitize_filepath
|
Make a valid file path from a string.
To make a valid file path, the function does the following:
- Replace invalid characters for a file path within the ``file_path``
with the ``replacement_text``. Invalid characters are as follows:
- unprintable characters
- |invalid_file_path_chars|
- for Windows (or universal) only: |invalid_win_file_path_chars|
- Replace a value if a sanitized value is a reserved name by operating systems
with a specified handler by ``reserved_name_handler``.
Args:
file_path:
File path to sanitize.
replacement_text:
Replacement text for invalid characters.
Defaults to ``""``.
platform:
Target platform name of the file path.
.. include:: platform.txt
max_len:
Maximum byte length of the file path.
Truncate the path if the value length exceeds the `max_len`.
If the value is |None| or minus, ``max_len`` will automatically determined by the ``platform``:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
- ``universal``: 260
fs_encoding:
Filesystem encoding that used to calculate the byte length of the file path.
If |None|, get the value from the execution environment.
check_reserved:
[Deprecated] Use 'reserved_name_handler' instead.
null_value_handler:
Function called when a value after sanitization is an empty string.
You can specify predefined handlers:
- :py:func:`.handler.NullValueHandler.return_null_string`
- :py:func:`.handler.NullValueHandler.return_timestamp`
- :py:func:`.handler.raise_error`
Defaults to :py:func:`.handler.NullValueHandler.return_null_string` that just return ``""``.
reserved_name_handler:
Function called when a value after sanitization is one of the reserved names.
You can specify predefined handlers:
- :py:meth:`~.handler.ReservedNameHandler.add_leading_underscore`
- :py:meth:`~.handler.ReservedNameHandler.add_trailing_underscore`
- :py:meth:`~.handler.ReservedNameHandler.as_is`
- :py:func:`~.handler.raise_error`
Defaults to :py:func:`.handler.add_trailing_underscore`.
additional_reserved_names:
Additional reserved names to sanitize.
Case insensitive.
normalize:
If |True|, normalize the the file path.
validate_after_sanitize:
Execute validation after sanitization to the file path.
Returns:
Same type as the argument (str or PathLike object):
Sanitized filepath.
Raises:
ValueError:
If the ``file_path`` is an invalid file path.
Example:
:ref:`example-sanitize-file-path`
|
def sanitize_filepath(
file_path: PathType,
replacement_text: str = "",
platform: Optional[PlatformType] = None,
max_len: Optional[int] = None,
fs_encoding: Optional[str] = None,
check_reserved: Optional[bool] = None,
null_value_handler: Optional[ValidationErrorHandler] = None,
reserved_name_handler: Optional[ValidationErrorHandler] = None,
additional_reserved_names: Optional[Sequence[str]] = None,
normalize: bool = True,
validate_after_sanitize: bool = False,
) -> PathType:
"""Make a valid file path from a string.
To make a valid file path, the function does the following:
- Replace invalid characters for a file path within the ``file_path``
with the ``replacement_text``. Invalid characters are as follows:
- unprintable characters
- |invalid_file_path_chars|
- for Windows (or universal) only: |invalid_win_file_path_chars|
- Replace a value if a sanitized value is a reserved name by operating systems
with a specified handler by ``reserved_name_handler``.
Args:
file_path:
File path to sanitize.
replacement_text:
Replacement text for invalid characters.
Defaults to ``""``.
platform:
Target platform name of the file path.
.. include:: platform.txt
max_len:
Maximum byte length of the file path.
Truncate the path if the value length exceeds the `max_len`.
If the value is |None| or minus, ``max_len`` will automatically determined by the ``platform``:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
- ``universal``: 260
fs_encoding:
Filesystem encoding that used to calculate the byte length of the file path.
If |None|, get the value from the execution environment.
check_reserved:
[Deprecated] Use 'reserved_name_handler' instead.
null_value_handler:
Function called when a value after sanitization is an empty string.
You can specify predefined handlers:
- :py:func:`.handler.NullValueHandler.return_null_string`
- :py:func:`.handler.NullValueHandler.return_timestamp`
- :py:func:`.handler.raise_error`
Defaults to :py:func:`.handler.NullValueHandler.return_null_string` that just return ``""``.
reserved_name_handler:
Function called when a value after sanitization is one of the reserved names.
You can specify predefined handlers:
- :py:meth:`~.handler.ReservedNameHandler.add_leading_underscore`
- :py:meth:`~.handler.ReservedNameHandler.add_trailing_underscore`
- :py:meth:`~.handler.ReservedNameHandler.as_is`
- :py:func:`~.handler.raise_error`
Defaults to :py:func:`.handler.add_trailing_underscore`.
additional_reserved_names:
Additional reserved names to sanitize.
Case insensitive.
normalize:
If |True|, normalize the the file path.
validate_after_sanitize:
Execute validation after sanitization to the file path.
Returns:
Same type as the argument (str or PathLike object):
Sanitized filepath.
Raises:
ValueError:
If the ``file_path`` is an invalid file path.
Example:
:ref:`example-sanitize-file-path`
"""
if check_reserved is not None:
warnings.warn(
"'check_reserved' is deprecated. Use 'reserved_name_handler' instead.",
DeprecationWarning,
)
if check_reserved is False:
reserved_name_handler = ReservedNameHandler.as_is
return FilePathSanitizer(
platform=platform,
max_len=-1 if max_len is None else max_len,
fs_encoding=fs_encoding,
normalize=normalize,
null_value_handler=null_value_handler,
reserved_name_handler=reserved_name_handler,
additional_reserved_names=additional_reserved_names,
validate_after_sanitize=validate_after_sanitize,
).sanitize(file_path, replacement_text)
|
(file_path: ~PathType, replacement_text: str = '', platform: Optional[~PlatformType] = None, max_len: Optional[int] = None, fs_encoding: Optional[str] = None, check_reserved: Optional[bool] = None, null_value_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, reserved_name_handler: Optional[Callable[[pathvalidate.error.ValidationError], str]] = None, additional_reserved_names: Optional[Sequence[str]] = None, normalize: bool = True, validate_after_sanitize: bool = False) -> ~PathType
|
708,796
|
pathvalidate._ltsv
|
sanitize_ltsv_label
|
Replace all of the symbols in text.
:param label: Input text.
:param replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
|
def sanitize_ltsv_label(label: str, replacement_text: str = "") -> str:
"""
Replace all of the symbols in text.
:param label: Input text.
:param replacement_text: Replacement text.
:return: A replacement string.
:rtype: str
"""
validate_pathtype(label, allow_whitespaces=False)
return __RE_INVALID_LTSV_LABEL.sub(replacement_text, to_str(label))
|
(label: str, replacement_text: str = '') -> str
|
708,798
|
pathvalidate._filepath
|
validate_filepath
|
Verifying whether the ``file_path`` is a valid file path or not.
Args:
file_path (PathType):
File path to be validated.
platform (Optional[PlatformType], optional):
Target platform name of the file path.
.. include:: platform.txt
min_len (int, optional):
Minimum byte length of the ``file_path``. The value must be greater or equal to one.
Defaults to ``1``.
max_len (Optional[int], optional):
Maximum byte length of the ``file_path``. If the value is |None| or minus,
automatically determined by the ``platform``:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
- ``universal``: 260
fs_encoding (Optional[str], optional):
Filesystem encoding that used to calculate the byte length of the file path.
If |None|, get the value from the execution environment.
check_reserved (bool, optional):
If |True|, check reserved names of the ``platform``.
Defaults to |True|.
additional_reserved_names (Optional[Sequence[str]], optional):
Additional reserved names to check.
Raises:
ValidationError (ErrorReason.INVALID_CHARACTER):
If the ``file_path`` includes invalid char(s):
|invalid_file_path_chars|.
The following characters are also invalid for Windows platforms:
|invalid_win_file_path_chars|
ValidationError (ErrorReason.INVALID_LENGTH):
If the ``file_path`` is longer than ``max_len`` characters.
ValidationError:
If ``file_path`` include invalid values.
Example:
:ref:`example-validate-file-path`
See Also:
`Naming Files, Paths, and Namespaces - Win32 apps | Microsoft Docs
<https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file>`__
|
def validate_filepath(
file_path: PathType,
platform: Optional[PlatformType] = None,
min_len: int = DEFAULT_MIN_LEN,
max_len: Optional[int] = None,
fs_encoding: Optional[str] = None,
check_reserved: bool = True,
additional_reserved_names: Optional[Sequence[str]] = None,
) -> None:
"""Verifying whether the ``file_path`` is a valid file path or not.
Args:
file_path (PathType):
File path to be validated.
platform (Optional[PlatformType], optional):
Target platform name of the file path.
.. include:: platform.txt
min_len (int, optional):
Minimum byte length of the ``file_path``. The value must be greater or equal to one.
Defaults to ``1``.
max_len (Optional[int], optional):
Maximum byte length of the ``file_path``. If the value is |None| or minus,
automatically determined by the ``platform``:
- ``Linux``: 4096
- ``macOS``: 1024
- ``Windows``: 260
- ``universal``: 260
fs_encoding (Optional[str], optional):
Filesystem encoding that used to calculate the byte length of the file path.
If |None|, get the value from the execution environment.
check_reserved (bool, optional):
If |True|, check reserved names of the ``platform``.
Defaults to |True|.
additional_reserved_names (Optional[Sequence[str]], optional):
Additional reserved names to check.
Raises:
ValidationError (ErrorReason.INVALID_CHARACTER):
If the ``file_path`` includes invalid char(s):
|invalid_file_path_chars|.
The following characters are also invalid for Windows platforms:
|invalid_win_file_path_chars|
ValidationError (ErrorReason.INVALID_LENGTH):
If the ``file_path`` is longer than ``max_len`` characters.
ValidationError:
If ``file_path`` include invalid values.
Example:
:ref:`example-validate-file-path`
See Also:
`Naming Files, Paths, and Namespaces - Win32 apps | Microsoft Docs
<https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file>`__
"""
FilePathValidator(
platform=platform,
min_len=min_len,
max_len=-1 if max_len is None else max_len,
fs_encoding=fs_encoding,
check_reserved=check_reserved,
additional_reserved_names=additional_reserved_names,
).validate(file_path)
|
(file_path: ~PathType, platform: Optional[~PlatformType] = None, min_len: int = 1, max_len: Optional[int] = None, fs_encoding: Optional[str] = None, check_reserved: bool = True, additional_reserved_names: Optional[Sequence[str]] = None) -> NoneType
|
708,799
|
pathvalidate._ltsv
|
validate_ltsv_label
|
Verifying whether ``label`` is a valid
`Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ label or not.
:param label: Label to validate.
:raises pathvalidate.ValidationError:
If invalid character(s) found in the ``label`` for a LTSV format label.
|
def validate_ltsv_label(label: str) -> None:
"""
Verifying whether ``label`` is a valid
`Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ label or not.
:param label: Label to validate.
:raises pathvalidate.ValidationError:
If invalid character(s) found in the ``label`` for a LTSV format label.
"""
validate_pathtype(label, allow_whitespaces=False)
match_list = __RE_INVALID_LTSV_LABEL.findall(to_str(label))
if match_list:
raise InvalidCharError(f"invalid character found for a LTSV format label: {match_list}")
|
(label: str) -> NoneType
|
708,800
|
pathvalidate._common
|
validate_pathtype
| null |
def validate_pathtype(
text: PathType, allow_whitespaces: bool = False, error_msg: Optional[str] = None
) -> None:
from .error import ErrorReason, ValidationError
if _is_not_null_string(text) or isinstance(text, PurePath):
return
if allow_whitespaces and _re_whitespaces.search(str(text)):
return
if is_null_string(text):
raise ValidationError(reason=ErrorReason.NULL_NAME)
raise TypeError(f"text must be a string: actual={type(text)}")
|
(text: ~PathType, allow_whitespaces: bool = False, error_msg: Optional[str] = None) -> NoneType
|
708,801
|
pathvalidate._symbol
|
validate_symbol
|
Verifying whether symbol(s) included in the ``text`` or not.
Args:
text:
Input text to validate.
Raises:
ValidationError (ErrorReason.INVALID_CHARACTER):
If symbol(s) included in the ``text``.
|
def validate_symbol(text: str) -> None:
"""
Verifying whether symbol(s) included in the ``text`` or not.
Args:
text:
Input text to validate.
Raises:
ValidationError (ErrorReason.INVALID_CHARACTER):
If symbol(s) included in the ``text``.
"""
match_list = __RE_SYMBOL.findall(to_str(text))
if match_list:
raise InvalidCharError(f"invalid symbols found: {match_list}")
|
(text: str) -> NoneType
|
708,802
|
pathvalidate._common
|
validate_unprintable_char
| null |
def validate_unprintable_char(text: str) -> None:
from .error import InvalidCharError
match_list = __RE_UNPRINTABLE_CHARS.findall(to_str(text))
if match_list:
raise InvalidCharError(f"unprintable character found: {match_list}")
|
(text: str) -> NoneType
|
708,811
|
sweetviz.dataframe_report
|
DataframeReport
| null |
class DataframeReport:
def __init__(self,
source: Union[pd.DataFrame, Tuple[pd.DataFrame, str]],
target_feature_name: str = None,
compare: Union[pd.DataFrame, Tuple[pd.DataFrame, str]] = None,
pairwise_analysis: str = 'auto',
fc: FeatureConfig = None,
verbosity: str = 'default'): # verbosity: default (full), full, progress_only, off
# Parse analysis parameter
pairwise_analysis = pairwise_analysis.lower()
if pairwise_analysis not in ["on", "auto", "off"]:
raise ValueError('"pairwise_analysis" parameter should be one of: "on", "auto", "off"')
# Parse verbosity parameter
if verbosity == "default":
verbosity = config["General"]["default_verbosity"]
if verbosity not in ["default", "full", "progress_only", "off"]:
raise ValueError('"verbosity" parameter should be one of: "default", "full", "progress_only", "off"')
self.verbosity_level = verbosity
sv_html.load_layout_globals_from_config()
self._jupyter_html = ""
self._page_html = ""
self._features = dict()
self.compare_name = None
self._target = None
self.test_mode = False
self.corr_warning = list()
if fc is None:
fc = FeatureConfig()
# Associations: _associations[FEATURE][GIVES INFORMATION ABOUT THIS FEATURE]
self._associations = dict()
self._associations_compare = dict()
self._association_graphs = dict()
self._association_graphs_compare = dict()
# Handle source and compare dataframes and names
if type(source) == pd.DataFrame:
source_df = source
self.source_name = "DataFrame"
elif type(source) == list or type(source) == tuple:
if len(source) != 2:
raise ValueError('"source" parameter should either be a string or a list of 2 elements: [dataframe, "Name"].')
source_df = source[0]
self.source_name = source[1]
else:
raise ValueError('"source" parameter should either be a string or a list of 2 elements: [dataframe, "Name"].')
if len(su.get_duplicate_cols(source_df)) > 0:
raise ValueError('Duplicate column names detected in "source"; this is not supported.')
# NEW (12-14-2020): Rename indices that use the reserved name "index"
# From pandas-profiling:
# If the DataFrame contains a column or index named `index`, this will produce errors. We rename the {index,column} to be `df_index`.
if 'index' in source_df.columns:
source_df = source_df.rename(columns={"index": "df_index"})
if target_feature_name == 'index':
target_feature_name = 'df_index'
all_source_names = [cur_name for cur_name, cur_series in source_df.items()]
if compare is None:
compare_df = None
self.compare_name = None
all_compare_names = list()
elif type(compare) == pd.DataFrame:
compare_df = compare
if 'index' in compare_df.columns:
compare_df = compare_df.rename(columns={"index": "df_index"})
self.compare_name = "Compared"
all_compare_names = [cur_name for cur_name, cur_series in compare_df.items()]
elif type(compare) == list or type(compare) == tuple:
if len(compare) != 2:
raise ValueError('"compare" parameter should either be a string or a list of 2 elements: [dataframe, "Name"].')
compare_df = compare[0]
if 'index' in compare_df.columns:
compare_df = compare_df.rename(columns={"index": "df_index"})
self.compare_name = compare[1]
all_compare_names = [cur_name for cur_name, cur_series in compare_df.items()]
else:
raise ValueError('"compare" parameter should either be a string or a list of 2 elements: [dataframe, "Name"].')
# Validate some params
if compare_df is not None and len(su.get_duplicate_cols(compare_df)) > 0:
raise ValueError('Duplicate column names detected in "compare"; this is not supported.')
if target_feature_name in fc.skip:
raise ValueError(f'"{target_feature_name}" was also specified as "skip". Target cannot be skipped.')
for key in fc.get_all_mentioned_features():
if key not in all_source_names:
raise ValueError(f'"{key}" was specified in "feature_config" but is not found in source dataframe (watch case-sensitivity?).')
# Find Features and Target (FILTER SKIPPED)
filtered_series_names_in_source = [cur_name for cur_name, cur_series in source_df.items()
if cur_name not in fc.skip]
for skipped in fc.skip:
if skipped not in all_source_names and skipped not in all_compare_names:
raise ValueError(f'"{skipped}" was marked as "skip" but is not in any provided dataframe (watch case-sensitivity?).')
# Progress bar setup
ratio_progress_of_df_summary_vs_feature = 1.0
number_features = len(filtered_series_names_in_source)
exponential_checks = number_features * number_features
progress_chunks = ratio_progress_of_df_summary_vs_feature \
+ number_features + (0 if target_feature_name is not None else 0)
class DummyFile(object):
def write(self, x):
pass # Do nothing
def flush(self):
pass # Do nothing
if self.verbosity_level in ('full', 'progress_only'):
self.progress_bar = tqdm(total=progress_chunks, bar_format= \
'{desc:45}|{bar}| [{percentage:3.0f}%] {elapsed} -> ({remaining} left)', \
ascii=False, dynamic_ncols=True, position=0, leave= True)
else:
# No progress bar, use dummy file
self.progress_bar = tqdm(total=progress_chunks, bar_format= \
'{desc:45}|{bar}| [{percentage:3.0f}%] {elapsed} -> ({remaining} left)', \
ascii=False, dynamic_ncols=True, position=0, leave= True, file=DummyFile())
# Summarize dataframe
self.progress_bar.set_description_str("[Summarizing dataframe]")
self.summary_source = dict()
self.summarize_dataframe(source_df, self.source_name, self.summary_source, fc.skip)
# UPDATE 2021-02-05: Count the target as an actual feature!!! It is!!!
# if target_feature_name:
# self.summary_source["num_columns"] = self.summary_source["num_columns"] - 1
if compare_df is not None:
self.summary_compare = dict()
self.summarize_dataframe(compare_df, self.compare_name, self.summary_compare, fc.skip)
cmp_not_in_src = \
[name for name in all_compare_names if name not in all_source_names]
self.summary_compare["num_cmp_not_in_source"] = len(cmp_not_in_src)
# UPDATE 2021-02-05: Count the target has an actual feature!!! It is!!!
# if target_feature_name:
# if target_feature_name in compare_df.columns:
# self.summary_compare["num_columns"] = self.summary_compare["num_columns"] - 1
else:
self.summary_compare = None
self.progress_bar.update(ratio_progress_of_df_summary_vs_feature)
self.num_summaries = number_features
# Association check
if pairwise_analysis == 'auto' and \
number_features > config["Processing"].getint("association_auto_threshold"):
print(f"PAIRWISE CALCULATION LENGTH WARNING: There are {number_features} features in "
f"this dataframe and the "
f"'pairwise_analysis' parameter is set to 'auto'.\nPairwise analysis is exponential in "
f"length: {number_features} features will cause ~"
f"{number_features * number_features} pairs to be "
f"evaluated, which could take a long time.\n\nYou must call the function with the "
f"parameter pairwise_analysis='on' or 'off' to explicitly select desired behavior."
)
self.progress_bar.close()
return
# Validate and process TARGET
target_to_process = None
target_type = None
if target_feature_name:
# Make sure target exists
self.progress_bar.set_description_str(f"Feature: {target_feature_name} (TARGET)")
targets_found = [item for item in filtered_series_names_in_source
if item == target_feature_name]
if len(targets_found) == 0:
self.progress_bar.close()
raise KeyError(f"Feature '{target_feature_name}' was "
f"specified as TARGET, but is NOT FOUND in "
f"the dataframe (watch case-sensitivity?).")
# Make sure target has no nan's
if source_df[targets_found[0]].isnull().values.any():
self.progress_bar.close()
raise ValueError(f"\nTarget feature '{targets_found[0]}' contains NaN (missing) values.\n"
f"To avoid confusion in interpreting target distribution,\n"
f"target features MUST NOT have any missing values at this time.\n")
# Find Target in compared, if present
compare_target_series = None
if compare_df is not None:
if target_feature_name in compare_df.columns:
if compare_df[target_feature_name].isnull().values.any():
self.progress_bar.close()
raise ValueError(
f"\nTarget feature '{target_feature_name}' in COMPARED data contains NaN (missing) values.\n"
f"To avoid confusion in interpreting target distribution,\n"
f"target features MUST NOT have any missing values at this time.\n")
compare_target_series = compare_df[target_feature_name]
# TARGET processed HERE with COMPARE if present
target_to_process = FeatureToProcess(-1, source_df[targets_found[0]], compare_target_series,
None, None, fc.get_predetermined_type(targets_found[0]))
self._target = sa.analyze_feature_to_dictionary(target_to_process)
filtered_series_names_in_source.remove(targets_found[0])
target_type = self._target["type"]
self.progress_bar.update(1)
# Set final target series and sanitize targets (e.g. bool->truly bool)
source_target_series = None
compare_target_series = None
if target_feature_name:
if target_feature_name not in source_df.columns:
raise ValueError
if self._target["type"] == sa.FeatureType.TYPE_BOOL:
source_target_series = self.get_sanitized_bool_series(source_df[target_feature_name])
else:
source_target_series = source_df[target_feature_name]
if compare_df is not None:
if target_feature_name in compare_df.columns:
if self._target["type"] == sa.FeatureType.TYPE_BOOL:
compare_target_series = self.get_sanitized_bool_series(compare_df[
target_feature_name])
else:
compare_target_series = compare_df[target_feature_name]
# Create list of features to process
features_to_process = []
for cur_series_name, cur_order_index in zip(filtered_series_names_in_source,
range(0, len(filtered_series_names_in_source))):
# TODO: BETTER HANDLING OF DIFFERENT COLUMNS IN SOURCE/COMPARE
if compare_df is not None and cur_series_name in \
compare_df.columns:
this_feat = FeatureToProcess(cur_order_index,
source_df[cur_series_name],
compare_df[cur_series_name],
source_target_series,
compare_target_series,
fc.get_predetermined_type(cur_series_name),
target_type)
else:
this_feat = FeatureToProcess(cur_order_index,
source_df[cur_series_name],
None,
source_target_series,
None,
fc.get_predetermined_type(cur_series_name),
target_type)
features_to_process.append(this_feat)
# Process columns -> features
self.run_id = hex(int(time.time()))[2:] + "_" # removes the decimals
# self.temp_folder = config["Files"].get("temp_folder")
# os.makedirs(os.path.normpath(self.temp_folder), exist_ok=True)
for f in features_to_process:
# start = time.perf_counter()
self.progress_bar.set_description_str(f"Feature: {f.source.name}")
self._features[f.source.name] = sa.analyze_feature_to_dictionary(f)
self.progress_bar.update(1)
# print(f"DONE FEATURE------> {f.source.name}"
# f" {(time.perf_counter() - start):.2f} {self._features[f.source.name]['type']}")
# self.progress_bar.set_description_str('[FEATURES DONE]')
# self.progress_bar.close()
# Wrap up summary
self.summarize_category_types(source_df, self.summary_source, fc.skip, self._target)
if compare is not None:
self.summarize_category_types(compare_df, self.summary_compare, fc.skip, self._target)
self.dataframe_summary_html = sv_html.generate_html_dataframe_summary(self)
self.graph_legend = GraphLegend(self)
# Process all associations
# ----------------------------------------------------
# Put target first
if target_to_process is not None:
features_to_process.insert(0,target_to_process)
if pairwise_analysis.lower() != 'off':
self.progress_bar.reset(total=len(features_to_process))
self.progress_bar.set_description_str("[Step 2/3] Processing Pairwise Features")
self.process_associations(features_to_process, source_target_series, compare_target_series)
self.progress_bar.reset(total=1)
self.progress_bar.set_description_str("[Step 3/3] Generating associations graph")
self.associations_html_source = True # Generated later in the process
self.associations_html_compare = True # Generated later in the process
self._association_graphs["all"] = GraphAssoc(self, "all", self._associations)
self._association_graphs_compare["all"] = GraphAssoc(self, "all", self._associations_compare)
self.progress_bar.set_description_str("Done! Use 'show' commands to display/save. ")
self.progress_bar.update(1)
else:
self._associations = None
self._associations_compare = None
self.associations_html_source = None
self.associations_html_compare = None
self.progress_bar.close()
return
def verbose_print(self, *args, **kwargs):
if self.verbosity_level == "full":
print(*args, **kwargs)
def __getitem__(self, key):
# Can also access target
if key in self._features.keys():
return self._features[key]
elif self._target is not None and key == self._target["name"]:
return self._target
else:
return None
def __setitem__(self, key, value):
self._features[key] = value
@staticmethod
def get_predetermined_type(name: str,
feature_predetermined_types: dict):
if feature_predetermined_types is None:
return sa.FeatureType.TYPE_UNSUPPORTED
return sa.FeatureType.TYPE_UNSUPPORTED
@staticmethod
def sanitize_bool(value) -> bool:
if value is bool:
return value
elif isinstance(value, str):
return value.lower() in ['true', '1', 't', 'y', 'yes', '1.0']
elif isinstance(value, float) or isinstance(value, int):
return bool(value)
return False
@staticmethod
def get_sanitized_bool_series(source: pd.Series) -> pd.Series:
# This casting due to nan's causing crashes
series_only_with_booleans = source.map(DataframeReport.sanitize_bool, na_action='ignore')
return (series_only_with_booleans * 1).astype('Int64')
def get_target_type(self) -> FeatureType:
if self._target is None:
return None
return self._target["type"]
def get_type(self, feature_name: str) -> FeatureType:
if self._features.get(feature_name) is None:
if self._target["name"] == feature_name:
return self._target["type"]
else:
return None
return self._features[feature_name].get("type")
def summarize_dataframe(self, source: pd.DataFrame, name: str, target_dict: dict, skip: List[str]):
target_dict["name"] = name
target_dict["num_rows"] = len(source)
target_dict["num_columns"] = len(source.columns)
target_dict["num_skipped_columns"] = len(source.columns) - len([x for x in source.columns if x not in skip])
target_dict["memory_total"] = source.memory_usage(index=True, deep=True).sum()
if target_dict["num_rows"] > 0:
target_dict["memory_single_row"] = \
float(target_dict["memory_total"]) / target_dict["num_rows"]
else:
target_dict["memory_single_row"] = 0
target_dict["duplicates"] = NumWithPercent(sum(source.duplicated()), len(source))
target_dict["num_cmp_not_in_source"] = 0 # set later, as needed
def summarize_category_types(self, this_df: pd.DataFrame, dest_dict: dict, skip: List[str], \
source_target_dict):
dest_dict["num_cat"] = len([x for x in self._features.values()
if (x["type"] == FeatureType.TYPE_CAT or x["type"] == FeatureType.TYPE_BOOL)
and x["name"] not in skip and x["name"] in this_df])
dest_dict["num_numerical"] = len([x for x in self._features.values()
if x["type"] == FeatureType.TYPE_NUM and x["name"] not in skip \
and x["name"] in this_df])
dest_dict["num_text"] = len([x for x in self._features.values()
if x["type"] == FeatureType.TYPE_TEXT and x["name"] not in skip \
and x["name"] in this_df])
if source_target_dict is not None and source_target_dict["name"] in this_df:
if source_target_dict["type"] == FeatureType.TYPE_NUM:
dest_dict["num_numerical"] = dest_dict["num_numerical"] + 1
elif source_target_dict["type"] == FeatureType.TYPE_CAT or source_target_dict["type"] == FeatureType.TYPE_BOOL:
dest_dict["num_cat"] = dest_dict["num_cat"] + 1
return
def get_what_influences_me(self, feature_name: str) -> dict:
influenced = dict()
for cur_name, cur_associations in self._associations.items():
if cur_name == feature_name:
continue
influence = cur_associations.get(feature_name)
if influence is not None:
influenced[cur_name] = influence
return influenced
# ----------------------------------------------------------------------------------------------
# ASSOCIATIONS
# ----------------------------------------------------------------------------------------------
def process_associations(self, features_to_process: List[FeatureToProcess], source_target_series,
compare_target_series):
def mirror_association(association_dict, feature_name, other_name, value):
if other_name not in association_dict.keys():
association_dict[other_name] = dict()
other_dict = association_dict[other_name]
if feature_name not in other_dict.keys():
other_dict[feature_name] = value
for feature in features_to_process:
feature_name = feature.source.name
if feature_name not in self._associations.keys():
self._associations[feature_name] = dict()
cur_associations = self._associations[feature_name]
if feature.compare is not None:
if feature_name not in self._associations_compare.keys():
self._associations_compare[feature_name] = dict()
cur_associations_compare = self._associations_compare[feature_name]
else:
cur_associations_compare = None
for other in features_to_process:
# for other in [of for of in features_to_process if of.source.name != feature_name]:
process_compare = cur_associations_compare is not None and other.compare is not None
# if other.source.name in cur_associations.keys():
# print(f"Skipping {feature_name} {other.source.name}")
# continue
if other.source.name == feature_name:
cur_associations[other.source.name] = 0.0
mirror_association(self._associations, feature_name, other.source.name, 0.0)
if process_compare:
cur_associations_compare[other.source.name] = 0.0
mirror_association(self._associations_compare, feature_name, other.source.name, 0.0)
continue
if self[feature_name]["type"] == FeatureType.TYPE_CAT or \
self[feature_name]["type"] == FeatureType.TYPE_BOOL:
# CAT/BOOL source
# ------------------------------------
if self[other.source.name]["type"] == FeatureType.TYPE_CAT or \
self[other.source.name]["type"] == FeatureType.TYPE_BOOL:
# CAT-CAT
cur_associations[other.source.name] = \
associations.theils_u(feature.source, other.source)
if process_compare:
cur_associations_compare[other.source.name] = \
associations.theils_u(feature.compare, other.compare)
elif self[other.source.name]["type"] == FeatureType.TYPE_NUM:
# CAT-NUM
# This handles cat-num, then mirrors so no need to process num-cat separately
# (symmetrical relationship)
cur_associations[other.source.name] = \
associations.correlation_ratio(feature.source, other.source)
mirror_association(self._associations, feature_name, other.source.name, \
cur_associations[other.source.name])
if process_compare:
cur_associations_compare[other.source.name] = \
associations.correlation_ratio(feature.compare, other.compare)
mirror_association(self._associations_compare, feature_name, other.source.name, \
cur_associations_compare[other.source.name])
elif self[feature_name]["type"] == FeatureType.TYPE_NUM:
# NUM source
# ------------------------------------
if self[other.source.name]["type"] == FeatureType.TYPE_NUM:
# NUM-NUM
try:
cur_associations[other.source.name] = \
feature.source.corr(other.source, method='pearson')
except FloatingPointError:
# This usually happens when there is only 1 non-NaN value in each data series
# Assigning the value 1.0 as per
# https://stats.stackexchange.com/questions/94150/why-is-the-pearson-correlation-1-when-only-two-data-values-are-available
# -> Also showing a warning
cur_associations[other.source.name] = 1.0
self.corr_warning.append(feature_name + "/" + other.source.name)
# TODO: display correlation error better in graph!
if isnan(cur_associations[other.source.name]):
if feature.source.equals(other.source):
cur_associations[other.source.name] = CORRELATION_IDENTICAL
else:
# ERROR may occur if Nan's in one match values in other, and vice-versa
cur_associations[other.source.name] = CORRELATION_ERROR
mirror_association(self._associations, feature_name, other.source.name, \
cur_associations[other.source.name])
if process_compare:
cur_associations_compare[other.source.name] = \
feature.compare.corr(other.compare, method='pearson')
# TODO: display correlation error better in graph!
if isnan(cur_associations_compare[other.source.name]):
if feature.compare.equals(other.compare):
cur_associations_compare[other.source.name] = CORRELATION_IDENTICAL
else:
# ERROR may occur if Nan's in one match values in other, and vice-versa
cur_associations_compare[other.source.name] = CORRELATION_ERROR
mirror_association(self._associations_compare, feature_name, other.source.name, \
cur_associations_compare[other.source.name])
self.progress_bar.update(1)
# ----------------------------------------------------------------------------------------------
# OUTPUT
# ----------------------------------------------------------------------------------------------
def use_config_if_none(self, passed_value, config_name):
if passed_value is None:
return config["Output_Defaults"][config_name]
return passed_value
def generate_comet_friendly_html(self):
# Enforce comet_ml-friendly layout and re-output report based on INI settings (comet_ml_Defaults)
self.page_layout = config["comet_ml_defaults"]["html_layout"]
self.scale = float(config["comet_ml_defaults"]["html_scale"])
sv_html.set_summary_positions(self)
sv_html.generate_html_detail(self)
if self.associations_html_source:
self.associations_html_source = sv_html.generate_html_associations(self, "source")
if self.associations_html_compare:
self.associations_html_compare = sv_html.generate_html_associations(self, "compare")
self._page_html = sv_html.generate_html_dataframe_page(self)
def show_html(self, filepath='SWEETVIZ_REPORT.html', open_browser=True, layout='widescreen', scale=None):
scale = float(self.use_config_if_none(scale, "html_scale"))
layout = self.use_config_if_none(layout, "html_layout")
if layout not in ['widescreen', 'vertical']:
raise ValueError(f"'layout' parameter must be either 'widescreen' or 'vertical'")
sv_html.load_layout_globals_from_config()
self.page_layout = layout
self.scale = scale
sv_html.set_summary_positions(self)
sv_html.generate_html_detail(self)
if self.associations_html_source:
self.associations_html_source = sv_html.generate_html_associations(self, "source")
if self.associations_html_compare:
self.associations_html_compare = sv_html.generate_html_associations(self, "compare")
self._page_html = sv_html.generate_html_dataframe_page(self)
f = open(filepath, 'w', encoding="utf-8")
f.write(self._page_html)
f.close()
if open_browser:
self.verbose_print(f"Report {filepath} was generated! NOTEBOOK/COLAB USERS: the web browser MAY not pop up, regardless, the report IS saved in your notebook/colab files.")
# Not sure how to work around this: not fatal but annoying...Notebook/colab
# https://bugs.python.org/issue5993
webbrowser.open('file://' + os.path.realpath(filepath))
else:
self.verbose_print(f"Report {filepath} was generated.")
if len(self.corr_warning):
print("---\nWARNING: one or more correlations had an edge-case/error and a 1.0 correlation was assigned\n"
"(likely due to only having a single row, containing non-NaN values for both correlated features)\n"
"Affected correlations:" + str(self.corr_warning))
# Auto-log to comet_ml if desired & present
self._comet_ml_logger = comet_ml_logger.CometLogger()
if self._comet_ml_logger._logging:
self.generate_comet_friendly_html()
self._comet_ml_logger.log_html(self._page_html)
self._comet_ml_logger.end()
def show_notebook(self, w=None, h=None, scale=None, layout=None, filepath=None, file_layout=None, file_scale=None):
w = self.use_config_if_none(w, "notebook_width")
h = self.use_config_if_none(h, "notebook_height")
scale = float(self.use_config_if_none(scale, "notebook_scale"))
layout = self.use_config_if_none(layout, "notebook_layout")
if layout not in ['widescreen', 'vertical']:
raise ValueError(f"'layout' parameter must be either 'widescreen' or 'vertical'")
sv_html.load_layout_globals_from_config()
self.page_layout = layout
self.scale = scale
sv_html.set_summary_positions(self)
sv_html.generate_html_detail(self)
if self.associations_html_source:
self.associations_html_source = sv_html.generate_html_associations(self, "source")
if self.associations_html_compare:
self.associations_html_compare = sv_html.generate_html_associations(self, "compare")
self._page_html = sv_html.generate_html_dataframe_page(self)
width=w
height=h
if str(height).lower() == "full":
height = self.page_height
# Output to iFrame
import html
self._page_html = html.escape(self._page_html)
iframe = f' <iframe width="{width}" height="{height}" srcdoc="{self._page_html}" frameborder="0" allowfullscreen></iframe>'
from IPython.display import display
from IPython.display import HTML
display(HTML(iframe))
if filepath is not None:
# We cannot just write out the same HTML as the notebook, as that one has been processed so as to
# remove extraneous headings so it is nicely inserted into the notebook.
# Instead, just do something similar to the "show_html()" code, but without its less-relevant printouts etc.
# f = open(filepath, 'w', encoding="utf-8")
# f.write(self._page_html)
# f.close()
scale = float(self.use_config_if_none(file_scale, "html_scale"))
layout = self.use_config_if_none(file_layout, "html_layout")
if layout not in ['widescreen', 'vertical']:
raise ValueError(f"'layout' parameter for file output must be either 'widescreen' or 'vertical'")
sv_html.load_layout_globals_from_config()
self.page_layout = layout
self.scale = scale
sv_html.set_summary_positions(self)
sv_html.generate_html_detail(self)
if self.associations_html_source:
self.associations_html_source = sv_html.generate_html_associations(self, "source")
if self.associations_html_compare:
self.associations_html_compare = sv_html.generate_html_associations(self, "compare")
self._page_html = sv_html.generate_html_dataframe_page(self)
f = open(filepath, 'w', encoding="utf-8")
f.write(self._page_html)
f.close()
self.verbose_print(f"Report '{filepath}' was saved to storage.")
if len(self.corr_warning):
print("WARNING: one or more correlations had an edge-case/error and a 1.0 correlation was assigned\n"
"(likely due to only a single row containing non-NaN values for both correlated features)\n"
"Affected correlations:" + str(self.corr_warning))
# Auto-log to comet_ml if desired & present
self._comet_ml_logger = comet_ml_logger.CometLogger()
if self._comet_ml_logger._logging:
self.generate_comet_friendly_html()
self._comet_ml_logger.log_html(self._page_html)
self._comet_ml_logger.end()
def log_comet(self, experiment: 'comet_ml_logger.Experiment'):
self.generate_comet_friendly_html()
try:
experiment.log_html(self._page_html)
except:
print("log_comet(): error logging HTML report.")
|
(source: Union[pandas.core.frame.DataFrame, Tuple[pandas.core.frame.DataFrame, str]], target_feature_name: str = None, compare: Union[pandas.core.frame.DataFrame, Tuple[pandas.core.frame.DataFrame, str]] = None, pairwise_analysis: str = 'auto', fc: sweetviz.feature_config.FeatureConfig = None, verbosity: str = 'default')
|
708,812
|
sweetviz.dataframe_report
|
__getitem__
| null |
def __getitem__(self, key):
# Can also access target
if key in self._features.keys():
return self._features[key]
elif self._target is not None and key == self._target["name"]:
return self._target
else:
return None
|
(self, key)
|
708,813
|
sweetviz.dataframe_report
|
__init__
| null |
def __init__(self,
source: Union[pd.DataFrame, Tuple[pd.DataFrame, str]],
target_feature_name: str = None,
compare: Union[pd.DataFrame, Tuple[pd.DataFrame, str]] = None,
pairwise_analysis: str = 'auto',
fc: FeatureConfig = None,
verbosity: str = 'default'): # verbosity: default (full), full, progress_only, off
# Parse analysis parameter
pairwise_analysis = pairwise_analysis.lower()
if pairwise_analysis not in ["on", "auto", "off"]:
raise ValueError('"pairwise_analysis" parameter should be one of: "on", "auto", "off"')
# Parse verbosity parameter
if verbosity == "default":
verbosity = config["General"]["default_verbosity"]
if verbosity not in ["default", "full", "progress_only", "off"]:
raise ValueError('"verbosity" parameter should be one of: "default", "full", "progress_only", "off"')
self.verbosity_level = verbosity
sv_html.load_layout_globals_from_config()
self._jupyter_html = ""
self._page_html = ""
self._features = dict()
self.compare_name = None
self._target = None
self.test_mode = False
self.corr_warning = list()
if fc is None:
fc = FeatureConfig()
# Associations: _associations[FEATURE][GIVES INFORMATION ABOUT THIS FEATURE]
self._associations = dict()
self._associations_compare = dict()
self._association_graphs = dict()
self._association_graphs_compare = dict()
# Handle source and compare dataframes and names
if type(source) == pd.DataFrame:
source_df = source
self.source_name = "DataFrame"
elif type(source) == list or type(source) == tuple:
if len(source) != 2:
raise ValueError('"source" parameter should either be a string or a list of 2 elements: [dataframe, "Name"].')
source_df = source[0]
self.source_name = source[1]
else:
raise ValueError('"source" parameter should either be a string or a list of 2 elements: [dataframe, "Name"].')
if len(su.get_duplicate_cols(source_df)) > 0:
raise ValueError('Duplicate column names detected in "source"; this is not supported.')
# NEW (12-14-2020): Rename indices that use the reserved name "index"
# From pandas-profiling:
# If the DataFrame contains a column or index named `index`, this will produce errors. We rename the {index,column} to be `df_index`.
if 'index' in source_df.columns:
source_df = source_df.rename(columns={"index": "df_index"})
if target_feature_name == 'index':
target_feature_name = 'df_index'
all_source_names = [cur_name for cur_name, cur_series in source_df.items()]
if compare is None:
compare_df = None
self.compare_name = None
all_compare_names = list()
elif type(compare) == pd.DataFrame:
compare_df = compare
if 'index' in compare_df.columns:
compare_df = compare_df.rename(columns={"index": "df_index"})
self.compare_name = "Compared"
all_compare_names = [cur_name for cur_name, cur_series in compare_df.items()]
elif type(compare) == list or type(compare) == tuple:
if len(compare) != 2:
raise ValueError('"compare" parameter should either be a string or a list of 2 elements: [dataframe, "Name"].')
compare_df = compare[0]
if 'index' in compare_df.columns:
compare_df = compare_df.rename(columns={"index": "df_index"})
self.compare_name = compare[1]
all_compare_names = [cur_name for cur_name, cur_series in compare_df.items()]
else:
raise ValueError('"compare" parameter should either be a string or a list of 2 elements: [dataframe, "Name"].')
# Validate some params
if compare_df is not None and len(su.get_duplicate_cols(compare_df)) > 0:
raise ValueError('Duplicate column names detected in "compare"; this is not supported.')
if target_feature_name in fc.skip:
raise ValueError(f'"{target_feature_name}" was also specified as "skip". Target cannot be skipped.')
for key in fc.get_all_mentioned_features():
if key not in all_source_names:
raise ValueError(f'"{key}" was specified in "feature_config" but is not found in source dataframe (watch case-sensitivity?).')
# Find Features and Target (FILTER SKIPPED)
filtered_series_names_in_source = [cur_name for cur_name, cur_series in source_df.items()
if cur_name not in fc.skip]
for skipped in fc.skip:
if skipped not in all_source_names and skipped not in all_compare_names:
raise ValueError(f'"{skipped}" was marked as "skip" but is not in any provided dataframe (watch case-sensitivity?).')
# Progress bar setup
ratio_progress_of_df_summary_vs_feature = 1.0
number_features = len(filtered_series_names_in_source)
exponential_checks = number_features * number_features
progress_chunks = ratio_progress_of_df_summary_vs_feature \
+ number_features + (0 if target_feature_name is not None else 0)
class DummyFile(object):
def write(self, x):
pass # Do nothing
def flush(self):
pass # Do nothing
if self.verbosity_level in ('full', 'progress_only'):
self.progress_bar = tqdm(total=progress_chunks, bar_format= \
'{desc:45}|{bar}| [{percentage:3.0f}%] {elapsed} -> ({remaining} left)', \
ascii=False, dynamic_ncols=True, position=0, leave= True)
else:
# No progress bar, use dummy file
self.progress_bar = tqdm(total=progress_chunks, bar_format= \
'{desc:45}|{bar}| [{percentage:3.0f}%] {elapsed} -> ({remaining} left)', \
ascii=False, dynamic_ncols=True, position=0, leave= True, file=DummyFile())
# Summarize dataframe
self.progress_bar.set_description_str("[Summarizing dataframe]")
self.summary_source = dict()
self.summarize_dataframe(source_df, self.source_name, self.summary_source, fc.skip)
# UPDATE 2021-02-05: Count the target as an actual feature!!! It is!!!
# if target_feature_name:
# self.summary_source["num_columns"] = self.summary_source["num_columns"] - 1
if compare_df is not None:
self.summary_compare = dict()
self.summarize_dataframe(compare_df, self.compare_name, self.summary_compare, fc.skip)
cmp_not_in_src = \
[name for name in all_compare_names if name not in all_source_names]
self.summary_compare["num_cmp_not_in_source"] = len(cmp_not_in_src)
# UPDATE 2021-02-05: Count the target has an actual feature!!! It is!!!
# if target_feature_name:
# if target_feature_name in compare_df.columns:
# self.summary_compare["num_columns"] = self.summary_compare["num_columns"] - 1
else:
self.summary_compare = None
self.progress_bar.update(ratio_progress_of_df_summary_vs_feature)
self.num_summaries = number_features
# Association check
if pairwise_analysis == 'auto' and \
number_features > config["Processing"].getint("association_auto_threshold"):
print(f"PAIRWISE CALCULATION LENGTH WARNING: There are {number_features} features in "
f"this dataframe and the "
f"'pairwise_analysis' parameter is set to 'auto'.\nPairwise analysis is exponential in "
f"length: {number_features} features will cause ~"
f"{number_features * number_features} pairs to be "
f"evaluated, which could take a long time.\n\nYou must call the function with the "
f"parameter pairwise_analysis='on' or 'off' to explicitly select desired behavior."
)
self.progress_bar.close()
return
# Validate and process TARGET
target_to_process = None
target_type = None
if target_feature_name:
# Make sure target exists
self.progress_bar.set_description_str(f"Feature: {target_feature_name} (TARGET)")
targets_found = [item for item in filtered_series_names_in_source
if item == target_feature_name]
if len(targets_found) == 0:
self.progress_bar.close()
raise KeyError(f"Feature '{target_feature_name}' was "
f"specified as TARGET, but is NOT FOUND in "
f"the dataframe (watch case-sensitivity?).")
# Make sure target has no nan's
if source_df[targets_found[0]].isnull().values.any():
self.progress_bar.close()
raise ValueError(f"\nTarget feature '{targets_found[0]}' contains NaN (missing) values.\n"
f"To avoid confusion in interpreting target distribution,\n"
f"target features MUST NOT have any missing values at this time.\n")
# Find Target in compared, if present
compare_target_series = None
if compare_df is not None:
if target_feature_name in compare_df.columns:
if compare_df[target_feature_name].isnull().values.any():
self.progress_bar.close()
raise ValueError(
f"\nTarget feature '{target_feature_name}' in COMPARED data contains NaN (missing) values.\n"
f"To avoid confusion in interpreting target distribution,\n"
f"target features MUST NOT have any missing values at this time.\n")
compare_target_series = compare_df[target_feature_name]
# TARGET processed HERE with COMPARE if present
target_to_process = FeatureToProcess(-1, source_df[targets_found[0]], compare_target_series,
None, None, fc.get_predetermined_type(targets_found[0]))
self._target = sa.analyze_feature_to_dictionary(target_to_process)
filtered_series_names_in_source.remove(targets_found[0])
target_type = self._target["type"]
self.progress_bar.update(1)
# Set final target series and sanitize targets (e.g. bool->truly bool)
source_target_series = None
compare_target_series = None
if target_feature_name:
if target_feature_name not in source_df.columns:
raise ValueError
if self._target["type"] == sa.FeatureType.TYPE_BOOL:
source_target_series = self.get_sanitized_bool_series(source_df[target_feature_name])
else:
source_target_series = source_df[target_feature_name]
if compare_df is not None:
if target_feature_name in compare_df.columns:
if self._target["type"] == sa.FeatureType.TYPE_BOOL:
compare_target_series = self.get_sanitized_bool_series(compare_df[
target_feature_name])
else:
compare_target_series = compare_df[target_feature_name]
# Create list of features to process
features_to_process = []
for cur_series_name, cur_order_index in zip(filtered_series_names_in_source,
range(0, len(filtered_series_names_in_source))):
# TODO: BETTER HANDLING OF DIFFERENT COLUMNS IN SOURCE/COMPARE
if compare_df is not None and cur_series_name in \
compare_df.columns:
this_feat = FeatureToProcess(cur_order_index,
source_df[cur_series_name],
compare_df[cur_series_name],
source_target_series,
compare_target_series,
fc.get_predetermined_type(cur_series_name),
target_type)
else:
this_feat = FeatureToProcess(cur_order_index,
source_df[cur_series_name],
None,
source_target_series,
None,
fc.get_predetermined_type(cur_series_name),
target_type)
features_to_process.append(this_feat)
# Process columns -> features
self.run_id = hex(int(time.time()))[2:] + "_" # removes the decimals
# self.temp_folder = config["Files"].get("temp_folder")
# os.makedirs(os.path.normpath(self.temp_folder), exist_ok=True)
for f in features_to_process:
# start = time.perf_counter()
self.progress_bar.set_description_str(f"Feature: {f.source.name}")
self._features[f.source.name] = sa.analyze_feature_to_dictionary(f)
self.progress_bar.update(1)
# print(f"DONE FEATURE------> {f.source.name}"
# f" {(time.perf_counter() - start):.2f} {self._features[f.source.name]['type']}")
# self.progress_bar.set_description_str('[FEATURES DONE]')
# self.progress_bar.close()
# Wrap up summary
self.summarize_category_types(source_df, self.summary_source, fc.skip, self._target)
if compare is not None:
self.summarize_category_types(compare_df, self.summary_compare, fc.skip, self._target)
self.dataframe_summary_html = sv_html.generate_html_dataframe_summary(self)
self.graph_legend = GraphLegend(self)
# Process all associations
# ----------------------------------------------------
# Put target first
if target_to_process is not None:
features_to_process.insert(0,target_to_process)
if pairwise_analysis.lower() != 'off':
self.progress_bar.reset(total=len(features_to_process))
self.progress_bar.set_description_str("[Step 2/3] Processing Pairwise Features")
self.process_associations(features_to_process, source_target_series, compare_target_series)
self.progress_bar.reset(total=1)
self.progress_bar.set_description_str("[Step 3/3] Generating associations graph")
self.associations_html_source = True # Generated later in the process
self.associations_html_compare = True # Generated later in the process
self._association_graphs["all"] = GraphAssoc(self, "all", self._associations)
self._association_graphs_compare["all"] = GraphAssoc(self, "all", self._associations_compare)
self.progress_bar.set_description_str("Done! Use 'show' commands to display/save. ")
self.progress_bar.update(1)
else:
self._associations = None
self._associations_compare = None
self.associations_html_source = None
self.associations_html_compare = None
self.progress_bar.close()
return
|
(self, source: Union[pandas.core.frame.DataFrame, Tuple[pandas.core.frame.DataFrame, str]], target_feature_name: Optional[str] = None, compare: Union[pandas.core.frame.DataFrame, Tuple[pandas.core.frame.DataFrame, str], NoneType] = None, pairwise_analysis: str = 'auto', fc: Optional[sweetviz.feature_config.FeatureConfig] = None, verbosity: str = 'default')
|
708,814
|
sweetviz.dataframe_report
|
__setitem__
| null |
def __setitem__(self, key, value):
self._features[key] = value
|
(self, key, value)
|
708,815
|
sweetviz.dataframe_report
|
generate_comet_friendly_html
| null |
def generate_comet_friendly_html(self):
# Enforce comet_ml-friendly layout and re-output report based on INI settings (comet_ml_Defaults)
self.page_layout = config["comet_ml_defaults"]["html_layout"]
self.scale = float(config["comet_ml_defaults"]["html_scale"])
sv_html.set_summary_positions(self)
sv_html.generate_html_detail(self)
if self.associations_html_source:
self.associations_html_source = sv_html.generate_html_associations(self, "source")
if self.associations_html_compare:
self.associations_html_compare = sv_html.generate_html_associations(self, "compare")
self._page_html = sv_html.generate_html_dataframe_page(self)
|
(self)
|
708,816
|
sweetviz.dataframe_report
|
get_predetermined_type
| null |
@staticmethod
def get_predetermined_type(name: str,
feature_predetermined_types: dict):
if feature_predetermined_types is None:
return sa.FeatureType.TYPE_UNSUPPORTED
return sa.FeatureType.TYPE_UNSUPPORTED
|
(name: str, feature_predetermined_types: dict)
|
708,817
|
sweetviz.dataframe_report
|
get_sanitized_bool_series
| null |
@staticmethod
def get_sanitized_bool_series(source: pd.Series) -> pd.Series:
# This casting due to nan's causing crashes
series_only_with_booleans = source.map(DataframeReport.sanitize_bool, na_action='ignore')
return (series_only_with_booleans * 1).astype('Int64')
|
(source: pandas.core.series.Series) -> pandas.core.series.Series
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.