instance_id stringlengths 13 45 | pull_number int64 7 30.1k | repo stringclasses 83
values | version stringclasses 68
values | base_commit stringlengths 40 40 | created_at stringdate 2013-05-16 18:15:55 2025-01-08 15:12:50 | patch stringlengths 347 35.2k | test_patch stringlengths 432 113k | non_py_patch stringlengths 0 18.3k | new_components listlengths 0 40 | FAIL_TO_PASS listlengths 1 2.53k | PASS_TO_PASS listlengths 0 1.7k | problem_statement stringlengths 607 52.7k | hints_text stringlengths 0 57.4k | environment_setup_commit stringclasses 167
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
sympy__sympy-23329 | 23,329 | sympy/sympy | 1.11 | 19ed9ef7c0eb8a6e135ef66289c0cb896d800dee | 2022-04-07T18:04:34Z | diff --git a/.mailmap b/.mailmap
index 1b2177727607..c684fe441b9a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -831,7 +831,7 @@ Ondřej Čertík <ondrej@certik.cz> <ondrej.certik@gmail.com>
Ondřej Čertík <ondrej@certik.cz> <ondrej@certik.us>
Ondřej Čertík <ondrej@certik.cz> ondrej.certik <devnull@localhost>
Or Dvory <gidesa@gmail.com>
-OrestisVaggelis <orestis22000@gmail.com>
+Orestis Vaggelis <orestisvaggelis@mail.com> OrestisVaggelis <orestis22000@gmail.com>
Oscar Benjamin <oscar.j.benjamin@gmail.com> <enojb@it051545.wks.bris.ac.uk>
Oscar Benjamin <oscar.j.benjamin@gmail.com> <oscar@kar-wench.(none)>
Oscar Gerardo Lazo Arjona <oscar.lazoarjona@physics.ox.ac.uk> oscarlazoarjona <oscar.lazoarjona@physics.ox.ac.uk>
diff --git a/AUTHORS b/AUTHORS
index 3f356c42e94a..eb7b08c3e7b9 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -821,7 +821,6 @@ Petr Kungurtsev <corwinat@gmail.com>
Anway De <anway1756@gmail.com>
znxftw <vishnu2101@gmail.com>
Denis Ivanenko <ivanenko@ucu.edu.ua>
-OrestisVaggelis <orestis22000@gmail.com>
Nikhil Maan <nikhilmaan22@gmail.com>
Abhinav Anand <abhinav.anand2807@gmail.com>
Qingsha Shi <googol.sqs@gmail.com>
diff --git a/doc/src/modules/combinatorics/group_numbers.rst b/doc/src/modules/combinatorics/group_numbers.rst
new file mode 100644
index 000000000000..fa742367bfb4
--- /dev/null
+++ b/doc/src/modules/combinatorics/group_numbers.rst
@@ -0,0 +1,12 @@
+.. _combinatorics-group_numbers:
+
+Nilpotent, Abelian and Cyclic Numbers
+=====================================
+
+.. module:: sympy.combinatorics.group_numbers
+
+.. autofunction:: is_nilpotent_number
+
+.. autofunction:: is_abelian_number
+
+.. autofunction:: is_cyclic_number
\ No newline at end of file
diff --git a/doc/src/modules/combinatorics/index.rst b/doc/src/modules/combinatorics/index.rst
index 61dcea2067a8..a547c3a31e3d 100644
--- a/doc/src/modules/combinatorics/index.rst
+++ b/doc/src/modules/combinatorics/index.rst
@@ -18,6 +18,7 @@ Contents
subsets.rst
graycode.rst
named_groups.rst
+ group_numbers.rst
util.rst
group_constructs.rst
testutil.rst
diff --git a/sympy/combinatorics/group_numbers.py b/sympy/combinatorics/group_numbers.py
new file mode 100644
index 000000000000..4433d76d2963
--- /dev/null
+++ b/sympy/combinatorics/group_numbers.py
@@ -0,0 +1,118 @@
+from sympy.core import Integer, Pow, Mod
+from sympy import factorint
+
+
+def is_nilpotent_number(n):
+ """
+ Check whether `n` is a nilpotent number. A number `n` is said to be
+ nilpotent if and only if every finite group of order `n` is nilpotent.
+ For more information see [1]_.
+
+ Examples
+ ========
+
+ >>> from sympy.combinatorics.group_numbers import is_nilpotent_number
+ >>> from sympy import randprime
+ >>> is_nilpotent_number(21)
+ False
+ >>> is_nilpotent_number(randprime(1, 30)**12)
+ True
+
+ References
+ ==========
+
+ .. [1] Pakianathan, J., Shankar, K., *Nilpotent Numbers*,
+ The American Mathematical Monthly, 107(7), 631-634.
+
+
+ """
+ if n <= 0 or int(n) != n:
+ raise ValueError("n must be a positive integer, not %i" % n)
+
+ n = Integer(n)
+ prime_factors = list(factorint(n).items())
+ is_nilpotent = True
+ for p_j, a_j in prime_factors:
+ for p_i, a_i in prime_factors:
+ if any([Mod(Pow(p_i, k), p_j) == 1 for k in range(1, a_i + 1)]):
+ is_nilpotent = False
+ break
+ if not is_nilpotent:
+ break
+
+ return is_nilpotent
+
+
+def is_abelian_number(n):
+ """
+ Check whether `n` is an abelian number. A number `n` is said to be abelian
+ if and only if every finite group of order `n` is abelian. For more
+ information see [1]_.
+
+ Examples
+ ========
+
+ >>> from sympy.combinatorics.group_numbers import is_abelian_number
+ >>> from sympy import randprime
+ >>> is_abelian_number(4)
+ True
+ >>> is_abelian_number(randprime(1, 2000)**2)
+ True
+ >>> is_abelian_number(60)
+ False
+
+ References
+ ==========
+
+ .. [1] Pakianathan, J., Shankar, K., *Nilpotent Numbers*,
+ The American Mathematical Monthly, 107(7), 631-634.
+
+
+ """
+ if n <= 0 or int(n) != n:
+ raise ValueError("n must be a positive integer, not %i" % n)
+
+ n = Integer(n)
+ if not is_nilpotent_number(n):
+ return False
+
+ prime_factors = list(factorint(n).items())
+ is_abelian = all(a_i < 3 for p_i, a_i in prime_factors)
+ return is_abelian
+
+
+def is_cyclic_number(n):
+ """
+ Check whether `n` is a cyclic number. A number `n` is said to be cyclic
+ if and only if every finite group of order `n` is cyclic. For more
+ information see [1]_.
+
+ Examples
+ ========
+
+ >>> from sympy.combinatorics.group_numbers import is_cyclic_number
+ >>> from sympy import randprime
+ >>> is_cyclic_number(15)
+ True
+ >>> is_cyclic_number(randprime(1, 2000)**2)
+ False
+ >>> is_cyclic_number(4)
+ False
+
+ References
+ ==========
+
+ .. [1] Pakianathan, J., Shankar, K., *Nilpotent Numbers*,
+ The American Mathematical Monthly, 107(7), 631-634.
+
+ """
+ if n <= 0 or int(n) != n:
+ raise ValueError("n must be a positive integer, not %i" % n)
+
+ n = Integer(n)
+ if not is_nilpotent_number(n):
+ return False
+
+ prime_factors = list(factorint(n).items())
+ is_cyclic = all(a_i < 2 for p_i, a_i in prime_factors)
+ return is_cyclic
| diff --git a/sympy/combinatorics/tests/test_group_numbers.py b/sympy/combinatorics/tests/test_group_numbers.py
new file mode 100644
index 000000000000..62de63cbcaad
--- /dev/null
+++ b/sympy/combinatorics/tests/test_group_numbers.py
@@ -0,0 +1,27 @@
+from sympy.combinatorics.group_numbers import (is_nilpotent_number,
+ is_abelian_number, is_cyclic_number)
+from sympy.testing.pytest import raises
+from sympy import randprime
+
+
+def test_is_nilpotent_number():
+ assert is_nilpotent_number(21) == False
+ assert is_nilpotent_number(randprime(1, 30)**12) == True
+ raises(ValueError, lambda: is_nilpotent_number(-5))
+
+
+def test_is_abelian_number():
+ assert is_abelian_number(4) == True
+ assert is_abelian_number(randprime(1, 2000)**2) == True
+ assert is_abelian_number(randprime(1000, 100000)) == True
+ assert is_abelian_number(60) == False
+ assert is_abelian_number(24) == False
+ raises(ValueError, lambda: is_abelian_number(-5))
+
+
+def test_is_cyclic_number():
+ assert is_cyclic_number(15) == True
+ assert is_cyclic_number(randprime(1, 2000)**2) == False
+ assert is_cyclic_number(randprime(1000, 100000)) == True
+ assert is_cyclic_number(4) == False
+ raises(ValueError, lambda: is_cyclic_number(-5))
| diff --git a/.mailmap b/.mailmap
index 1b2177727607..c684fe441b9a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -831,7 +831,7 @@ Ondřej Čertík <ondrej@certik.cz> <ondrej.certik@gmail.com>
Ondřej Čertík <ondrej@certik.cz> <ondrej@certik.us>
Ondřej Čertík <ondrej@certik.cz> ondrej.certik <devnull@localhost>
Or Dvory <gidesa@gmail.com>
-OrestisVaggelis <orestis22000@gmail.com>
+Orestis Vaggelis <orestisvaggelis@mail.com> OrestisVaggelis <orestis22000@gmail.com>
Oscar Benjamin <oscar.j.benjamin@gmail.com> <enojb@it051545.wks.bris.ac.uk>
Oscar Benjamin <oscar.j.benjamin@gmail.com> <oscar@kar-wench.(none)>
Oscar Gerardo Lazo Arjona <oscar.lazoarjona@physics.ox.ac.uk> oscarlazoarjona <oscar.lazoarjona@physics.ox.ac.uk>
diff --git a/AUTHORS b/AUTHORS
index 3f356c42e94a..eb7b08c3e7b9 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -821,7 +821,6 @@ Petr Kungurtsev <corwinat@gmail.com>
Anway De <anway1756@gmail.com>
znxftw <vishnu2101@gmail.com>
Denis Ivanenko <ivanenko@ucu.edu.ua>
-OrestisVaggelis <orestis22000@gmail.com>
Nikhil Maan <nikhilmaan22@gmail.com>
Abhinav Anand <abhinav.anand2807@gmail.com>
Qingsha Shi <googol.sqs@gmail.com>
diff --git a/doc/src/modules/combinatorics/group_numbers.rst b/doc/src/modules/combinatorics/group_numbers.rst
new file mode 100644
index 000000000000..fa742367bfb4
--- /dev/null
+++ b/doc/src/modules/combinatorics/group_numbers.rst
@@ -0,0 +1,12 @@
+.. _combinatorics-group_numbers:
+
+Nilpotent, Abelian and Cyclic Numbers
+=====================================
+
+.. module:: sympy.combinatorics.group_numbers
+
+.. autofunction:: is_nilpotent_number
+
+.. autofunction:: is_abelian_number
+
+.. autofunction:: is_cyclic_number
\ No newline at end of file
diff --git a/doc/src/modules/combinatorics/index.rst b/doc/src/modules/combinatorics/index.rst
index 61dcea2067a8..a547c3a31e3d 100644
--- a/doc/src/modules/combinatorics/index.rst
+++ b/doc/src/modules/combinatorics/index.rst
@@ -18,6 +18,7 @@ Contents
subsets.rst
graycode.rst
named_groups.rst
+ group_numbers.rst
util.rst
group_constructs.rst
testutil.rst
| [
{
"components": [
{
"doc": "Check whether `n` is a nilpotent number. A number `n` is said to be\nnilpotent if and only if every finite group of order `n` is nilpotent.\nFor more information see [1]_.\n\nExamples\n========\n\n>>> from sympy.combinatorics.group_numbers import is_nilpotent_number\n>>... | [
"test_is_nilpotent_number",
"test_is_abelian_number"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Added methods for nilpotent, abelian and cyclic numbers.
<!-- Your title above should be a short description of what
was changed. Do not include the issue number in the title. -->
#### References to other Issues or PRs
<!-- If this pull request fixes an issue, write "Fixes #NNNN" in that exact
format, e.g. "Fixes #1234" (see
https://tinyurl.com/auto-closing for more information). Also, please
write a comment on that issue linking back to this pull request once it is
open. -->
#### Brief description of what is fixed or changed
Implemented methods to check if a number is nilpotent, abelian or cyclic. Details about the algorithm can be found in
[this](http://www2.math.ou.edu/~shankar/papers/nil2.pdf) paper.
#### Other comments
#### Release Notes
<!-- Write the release notes for this release below between the BEGIN and END
statements. The basic format is a bulleted list with the name of the subpackage
and the release note for this PR. For example:
* solvers
* Added a new solver for logarithmic equations.
* functions
* Fixed a bug with log of integers.
or if no release note(s) should be included use:
NO ENTRY
See https://github.com/sympy/sympy/wiki/Writing-Release-Notes for more
information on how to write release notes. The bot will check your release
notes automatically to see if they are formatted correctly. -->
<!-- BEGIN RELEASE NOTES -->
* combinatorics
* Added methods for nilpotent, abelian and cyclic numbers.
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/combinatorics/group_numbers.py]
(definition of is_nilpotent_number:)
def is_nilpotent_number(n):
"""Check whether `n` is a nilpotent number. A number `n` is said to be
nilpotent if and only if every finite group of order `n` is nilpotent.
For more information see [1]_.
Examples
========
>>> from sympy.combinatorics.group_numbers import is_nilpotent_number
>>> from sympy import randprime
>>> is_nilpotent_number(21)
False
>>> is_nilpotent_number(randprime(1, 30)**12)
True
References
==========
.. [1] Pakianathan, J., Shankar, K., *Nilpotent Numbers*,
The American Mathematical Monthly, 107(7), 631-634."""
(definition of is_abelian_number:)
def is_abelian_number(n):
"""Check whether `n` is an abelian number. A number `n` is said to be abelian
if and only if every finite group of order `n` is abelian. For more
information see [1]_.
Examples
========
>>> from sympy.combinatorics.group_numbers import is_abelian_number
>>> from sympy import randprime
>>> is_abelian_number(4)
True
>>> is_abelian_number(randprime(1, 2000)**2)
True
>>> is_abelian_number(60)
False
References
==========
.. [1] Pakianathan, J., Shankar, K., *Nilpotent Numbers*,
The American Mathematical Monthly, 107(7), 631-634."""
(definition of is_cyclic_number:)
def is_cyclic_number(n):
"""Check whether `n` is a cyclic number. A number `n` is said to be cyclic
if and only if every finite group of order `n` is cyclic. For more
information see [1]_.
Examples
========
>>> from sympy.combinatorics.group_numbers import is_cyclic_number
>>> from sympy import randprime
>>> is_cyclic_number(15)
True
>>> is_cyclic_number(randprime(1, 2000)**2)
False
>>> is_cyclic_number(4)
False
References
==========
.. [1] Pakianathan, J., Shankar, K., *Nilpotent Numbers*,
The American Mathematical Monthly, 107(7), 631-634."""
[end of new definitions in sympy/combinatorics/group_numbers.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | edf24253833ca153cb6d29ae54092ecebe29614c | |
astropy__astropy-13076 | 13,076 | astropy/astropy | 5.0 | 2ae987ba1160486e36eb315ee0e9d85cdbea5844 | 2022-04-07T05:36:36Z | diff --git a/astropy/cosmology/core.py b/astropy/cosmology/core.py
index de4a4b0a92f4..a6b5e5daa169 100644
--- a/astropy/cosmology/core.py
+++ b/astropy/cosmology/core.py
@@ -1,7 +1,10 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
+from __future__ import annotations
+
import abc
import inspect
+from typing import Optional, Set, Type, TypeVar
import numpy as np
@@ -23,9 +26,19 @@
__doctest_requires__ = {} # needed until __getattr__ removed
+
+##############################################################################
+# Parameters
+
# registry of cosmology classes with {key=name : value=class}
_COSMOLOGY_CLASSES = dict()
+# typing
+_CosmoT = TypeVar("_CosmoT", bound="Cosmology")
+_FlatCosmoT = TypeVar("_FlatCosmoT", bound="FlatCosmologyMixin")
+
+##############################################################################
+
class CosmologyError(Exception):
pass
@@ -375,11 +388,74 @@ class FlatCosmologyMixin(metaclass=abc.ABCMeta):
but ``FlatLambdaCDM`` **will** be flat.
"""
+ def __init_subclass__(cls: Type[_FlatCosmoT]) -> None:
+ super().__init_subclass__()
+
+ # Determine the non-flat class.
+ # This will raise a TypeError if the MRO is inconsistent.
+ cls._nonflat_cls_
+
+ # ===============================================================
+
+ @classmethod # TODO! make metaclass-method
+ def _get_nonflat_cls(cls, kls: Optional[Type[_CosmoT]]=None) -> Optional[Type[Cosmology]]:
+ """Find the corresponding non-flat class.
+
+ The class' bases are searched recursively.
+
+ Parameters
+ ----------
+ kls : :class:`astropy.cosmology.Cosmology` class or None, optional
+ If `None` (default) this class is searched instead of `kls`.
+
+ Raises
+ ------
+ TypeError
+ If more than one non-flat class is found at the same level of the
+ inheritance. This is similar to the error normally raised by Python
+ for an inconsistent method resolution order.
+
+ Returns
+ -------
+ type
+ A :class:`Cosmology` subclass this class inherits from that is not a
+ :class:`FlatCosmologyMixin` subclass.
+ """
+ _kls = cls if kls is None else kls
+
+ # Find non-flat classes
+ nonflat: Set[Type[Cosmology]]
+ nonflat = {b for b in _kls.__bases__
+ if issubclass(b, Cosmology) and not issubclass(b, FlatCosmologyMixin)}
+
+ if not nonflat: # e.g. subclassing FlatLambdaCDM
+ nonflat = {k for b in _kls.__bases__ if (k := cls._get_nonflat_cls(b)) is not None}
+
+ if len(nonflat) > 1:
+ raise TypeError(
+ f"cannot create a consistent non-flat class resolution order "
+ f"for {_kls} with bases {nonflat} at the same inheritance level."
+ )
+ if not nonflat: # e.g. FlatFLRWMixin(FlatCosmologyMixin)
+ return None
+
+ return nonflat.pop()
+
+ _nonflat_cls_ = classproperty(_get_nonflat_cls, lazy=True,
+ doc="Return the corresponding non-flat class.")
+
+ # ===============================================================
+
@property
def is_flat(self):
"""Return `True`, the cosmology is flat."""
return True
+ @property
+ @abc.abstractmethod
+ def equivalent_nonflat(self: _FlatCosmoT) -> _CosmoT:
+ """Return the equivalent non-flat-class instance of this cosmology."""
+
# -----------------------------------------------------------------------------
diff --git a/astropy/cosmology/flrw/base.py b/astropy/cosmology/flrw/base.py
index 6fc7c4d4070d..1b3b46611023 100644
--- a/astropy/cosmology/flrw/base.py
+++ b/astropy/cosmology/flrw/base.py
@@ -1,9 +1,12 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
+from __future__ import annotations
+
import warnings
from abc import abstractmethod
from math import exp, floor, log, pi, sqrt
from numbers import Number
+from typing import TypeVar
import numpy as np
from numpy import inf, sin
@@ -14,6 +17,7 @@
from astropy.cosmology.parameter import Parameter, _validate_non_negative, _validate_with_unit
from astropy.cosmology.utils import aszarr, vectorize_redshift_method
from astropy.utils.compat.optional_deps import HAS_SCIPY
+from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
# isort: split
@@ -29,6 +33,9 @@ def quad(*args, **kwargs):
__doctest_requires__ = {'*': ['scipy']}
+##############################################################################
+# Parameters
+
# Some conversion constants -- useful to compute them once here and reuse in
# the initialization rather than have every object do them.
_H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s)
@@ -44,6 +51,13 @@ def quad(*args, **kwargs):
_kB_evK = const.k_B.to(u.eV / u.K)
+# typing
+_FLRWT = TypeVar("_FLRWT", bound="FLRW")
+_FlatFLRWMixinT = TypeVar("_FlatFLRWMixinT", bound="FlatFLRWMixin")
+
+##############################################################################
+
+
class FLRW(Cosmology):
"""
A class describing an isotropic and homogeneous
@@ -1422,6 +1436,20 @@ def __init__(self, *args, **kw):
self._Ok0 = 0.0
self._Ode0 = 1.0 - (self._Om0 + self._Ogamma0 + self._Onu0 + self._Ok0)
+ @lazyproperty
+ def equivalent_nonflat(self: _FlatFLRWMixinT) -> _FLRWT:
+ # Create BoundArgument to handle args versus kwargs.
+ # This also handles all errors from mismatched arguments
+ ba = self._nonflat_cls_._init_signature.bind_partial(**self._init_arguments,
+ Ode0=self.Ode0)
+ # Make new instance, respecting args vs kwargs
+ inst = self._nonflat_cls_(*ba.args, **ba.kwargs)
+ # Because of machine precision, make sure parameters exactly match
+ for n in inst.__all_parameters__ + ("Ok0", ):
+ setattr(inst, "_" + n, getattr(self, n))
+
+ return inst
+
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
diff --git a/astropy/cosmology/flrw/lambdacdm.py b/astropy/cosmology/flrw/lambdacdm.py
index 8969f407c742..e11c29aa98c1 100644
--- a/astropy/cosmology/flrw/lambdacdm.py
+++ b/astropy/cosmology/flrw/lambdacdm.py
@@ -603,6 +603,12 @@ class FlatLambdaCDM(FlatFLRWMixin, LambdaCDM):
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
+
+ To get an equivalent cosmology, but of type `astropy.cosmology.LambdaCDM`,
+ use :attr:`astropy.cosmology.FlatFLRWMixin.equivalent_nonflat`.
+
+ >>> cosmo.equivalent_nonflat
+ LambdaCDM(H0=70.0 km / (Mpc s), Om0=0.3, ...
"""
def __init__(self, H0, Om0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
diff --git a/astropy/cosmology/flrw/w0cdm.py b/astropy/cosmology/flrw/w0cdm.py
index eea77c32c090..aeb877978f1c 100644
--- a/astropy/cosmology/flrw/w0cdm.py
+++ b/astropy/cosmology/flrw/w0cdm.py
@@ -253,6 +253,12 @@ class FlatwCDM(FlatFLRWMixin, wCDM):
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
+
+ To get an equivalent cosmology, but of type `astropy.cosmology.wCDM`,
+ use :attr:`astropy.cosmology.FlatFLRWMixin.equivalent_nonflat`.
+
+ >>> cosmo.equivalent_nonflat
+ wCDM(H0=70.0 km / (Mpc s), Om0=0.3, ...
"""
def __init__(self, H0, Om0, w0=-1.0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
diff --git a/astropy/cosmology/flrw/w0wacdm.py b/astropy/cosmology/flrw/w0wacdm.py
index 571cc2dad50d..b1a3a68344e1 100644
--- a/astropy/cosmology/flrw/w0wacdm.py
+++ b/astropy/cosmology/flrw/w0wacdm.py
@@ -236,6 +236,12 @@ class Flatw0waCDM(FlatFLRWMixin, w0waCDM):
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
+ To get an equivalent cosmology, but of type `astropy.cosmology.w0waCDM`,
+ use :attr:`astropy.cosmology.FlatFLRWMixin.equivalent_nonflat`.
+
+ >>> cosmo.equivalent_nonflat
+ w0waCDM(H0=70.0 km / (Mpc s), Om0=0.3, ...
+
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
diff --git a/docs/changes/cosmology/13076.feature.rst b/docs/changes/cosmology/13076.feature.rst
new file mode 100644
index 000000000000..cc79f44bbfe5
--- /dev/null
+++ b/docs/changes/cosmology/13076.feature.rst
@@ -0,0 +1,3 @@
+A new property ``equivalent_nonflat`` has been added to flat cosmologies
+(``FlatCosmologyMixin`` subclasses) to get an equivalent cosmology, but of the
+corresponding non-flat class.
diff --git a/docs/whatsnew/5.1.rst b/docs/whatsnew/5.1.rst
index 465a6e1082ae..282a0ec0828d 100644
--- a/docs/whatsnew/5.1.rst
+++ b/docs/whatsnew/5.1.rst
@@ -42,6 +42,14 @@ The list of valid formats, e.g. the Table in this example, may be
checked with ``Cosmology.from_format.list_formats()``
+A new property ``equivalent_nonflat`` has been added to flat cosmologies
+(``FlatCosmologyMixin`` subclasses) to get an equivalent cosmology, but of the
+corresponding non-flat class.
+
+ >>> Planck18.equivalent_nonflat
+ LambdaCDM(name="Planck18", ...
+
+
.. _whatsnew-doppler-redshift-eq:
``doppler_redshift`` equivalency
| diff --git a/astropy/cosmology/tests/test_core.py b/astropy/cosmology/tests/test_core.py
index 8d75e58507db..62daa98e8e7f 100644
--- a/astropy/cosmology/tests/test_core.py
+++ b/astropy/cosmology/tests/test_core.py
@@ -17,7 +17,7 @@
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
-from astropy.cosmology import Cosmology
+from astropy.cosmology import Cosmology, CosmologyError, FlatCosmologyMixin
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter
from astropy.table import Column, QTable, Table
@@ -400,6 +400,19 @@ class TestFlatSomeCosmology(FlatCosmologyMixinTest, TestSomeCosmology):
...
"""
+ def test_nonflat_class_(self, cosmo_cls, cosmo):
+ """Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat_cls`.
+ """
+ # Test it's a method on the class
+ assert issubclass(cosmo_cls, cosmo_cls._nonflat_cls_)
+
+ # It also works from the instance. # TODO! as a "metaclassmethod"
+ assert issubclass(cosmo_cls, cosmo._nonflat_cls_)
+
+ # Maybe not the most robust test, but so far all Flat classes have the
+ # name of their parent class.
+ assert cosmo._nonflat_cls_.__name__ in cosmo_cls.__name__
+
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
super().test_is_flat(cosmo_cls, cosmo)
@@ -407,6 +420,12 @@ def test_is_flat(self, cosmo_cls, cosmo):
# it's always True
assert cosmo.is_flat is True
+ def test_equivalent_nonflat(self, cosmo):
+ """Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.equivalent_nonflat`.
+ """
+ assert cosmo.equivalent_nonflat.is_equivalent(cosmo)
+ assert cosmo.is_equivalent(cosmo.equivalent_nonflat)
+
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.is_equivalent`.
@@ -417,6 +436,60 @@ def test_is_equivalent(self, cosmo):
"""
CosmologySubclassTest.test_is_equivalent(self, cosmo)
+ # ===============================================================
+ # Usage Tests
+
+ def test_subclassing(self, cosmo_cls):
+ """Test when subclassing a flat cosmology."""
+
+ class SubClass1(cosmo_cls):
+ pass
+
+ # The classes have the same non-flat parent class
+ assert SubClass1._nonflat_cls_ is cosmo_cls._nonflat_cls_
+
+ # A more complex example is when Mixin classes are used.
+ class Mixin:
+ pass
+
+ class SubClass2(Mixin, cosmo_cls):
+ pass
+
+ # The classes have the same non-flat parent class
+ assert SubClass2._nonflat_cls_ is cosmo_cls._nonflat_cls_
+
+ # The order of the Mixin should not matter
+ class SubClass3(cosmo_cls, Mixin):
+ pass
+
+ # The classes have the same non-flat parent class
+ assert SubClass3._nonflat_cls_ is cosmo_cls._nonflat_cls_
+
+
+def test_nonflat_cls_multiple_nonflat_inheritance():
+ """
+ Test :meth:`astropy.cosmology.core.FlatCosmologyMixin._nonflat_cls_`
+ when there's more than one non-flat class in the inheritance.
+ """
+ # Define a non-operable minimal subclass of Cosmology.
+ class SubCosmology2(Cosmology):
+
+ def __init__(self, H0, Tcmb0=0*u.K, m_nu=0*u.eV, name=None, meta=None):
+ super().__init__(name=name, meta=meta)
+
+ @property
+ def is_flat(self):
+ return False
+
+ # Now make an ambiguous flat cosmology from the two SubCosmologies
+ with pytest.raises(TypeError, match="cannot create a consistent non-flat class"):
+
+ class FlatSubCosmology(FlatCosmologyMixin, SubCosmology, SubCosmology2):
+
+ @property
+ def equivalent_nonflat(self):
+ pass
+
# -----------------------------------------------------------------------------
| diff --git a/docs/changes/cosmology/13076.feature.rst b/docs/changes/cosmology/13076.feature.rst
new file mode 100644
index 000000000000..cc79f44bbfe5
--- /dev/null
+++ b/docs/changes/cosmology/13076.feature.rst
@@ -0,0 +1,3 @@
+A new property ``equivalent_nonflat`` has been added to flat cosmologies
+(``FlatCosmologyMixin`` subclasses) to get an equivalent cosmology, but of the
+corresponding non-flat class.
diff --git a/docs/whatsnew/5.1.rst b/docs/whatsnew/5.1.rst
index 465a6e1082ae..282a0ec0828d 100644
--- a/docs/whatsnew/5.1.rst
+++ b/docs/whatsnew/5.1.rst
@@ -42,6 +42,14 @@ The list of valid formats, e.g. the Table in this example, may be
checked with ``Cosmology.from_format.list_formats()``
+A new property ``equivalent_nonflat`` has been added to flat cosmologies
+(``FlatCosmologyMixin`` subclasses) to get an equivalent cosmology, but of the
+corresponding non-flat class.
+
+ >>> Planck18.equivalent_nonflat
+ LambdaCDM(name="Planck18", ...
+
+
.. _whatsnew-doppler-redshift-eq:
``doppler_redshift`` equivalency
| [
{
"components": [
{
"doc": "",
"lines": [
391,
396
],
"name": "FlatCosmologyMixin.__init_subclass__",
"signature": "def __init_subclass__(cls: Type[_FlatCosmoT]) -> None:",
"type": "function"
},
{
"doc": "Find the corr... | [
"astropy/cosmology/tests/test_core.py::test_nonflat_cls_multiple_nonflat_inheritance"
] | [
"astropy/cosmology/tests/test_core.py::TestCosmology::test_to_table_bad_index",
"astropy/cosmology/tests/test_core.py::TestCosmology::test_to_table_failed_cls",
"astropy/cosmology/tests/test_core.py::TestCosmology::test_to_table_cls[QTable]",
"astropy/cosmology/tests/test_core.py::TestCosmology::test_to_table... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add property `equivalent_nonflat` to flat cosmologies
Signed-off-by: Nathaniel Starkman (@nstarman) <nstarkman@protonmail.com>
A new property ``equivalent_nonflat`` has been added to ``FlatCosmologyMixin``
to get an equivalent cosmology, but of the corresponding non-flat class.
### Checklist for package maintainer(s)
<!-- This section is to be filled by package maintainer(s) who will
review this pull request. -->
This checklist is meant to remind the package maintainer(s) who will review this pull request of some common things to look for. This list is not exhaustive.
- [x] Do the proposed changes actually accomplish desired goals?
- [x] Do the proposed changes follow the [Astropy coding guidelines](https://docs.astropy.org/en/latest/development/codeguide.html)?
- [x] Are tests added/updated as required? If so, do they follow the [Astropy testing guidelines](https://docs.astropy.org/en/latest/development/testguide.html)?
- [ ] Are docs added/updated as required? If so, do they follow the [Astropy documentation guidelines](https://docs.astropy.org/en/latest/development/docguide.html#astropy-documentation-rules-and-guidelines)?
- [x] Is rebase and/or squash necessary? If so, please provide the author with appropriate instructions. Also see ["When to rebase and squash commits"](https://docs.astropy.org/en/latest/development/when_to_rebase.html).
- [x] Did the CI pass? If no, are the failures related? If you need to run daily and weekly cron jobs as part of the PR, please apply the `Extra CI` label.
- [x] Is a change log needed? If yes, did the change log check pass? If no, add the `no-changelog-entry-needed` label. If this is a manual backport, use the `skip-changelog-checks` label unless special changelog handling is necessary.
- [x] Is this a big PR that makes a "What's new?" entry worthwhile and if so, is (1) a "what's new" entry included in this PR and (2) the "whatsnew-needed" label applied?
- [x] Is a milestone set? Milestone must be set but `astropy-bot` check might be missing; do not let the green checkmark fool you.
- [x] At the time of adding the milestone, if the milestone set requires a backport to release branch(es), apply the appropriate `backport-X.Y.x` label(s) *before* merge.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in astropy/cosmology/core.py]
(definition of FlatCosmologyMixin.__init_subclass__:)
def __init_subclass__(cls: Type[_FlatCosmoT]) -> None:
(definition of FlatCosmologyMixin._get_nonflat_cls:)
def _get_nonflat_cls(cls, kls: Optional[Type[_CosmoT]]=None) -> Optional[Type[Cosmology]]:
"""Find the corresponding non-flat class.
The class' bases are searched recursively.
Parameters
----------
kls : :class:`astropy.cosmology.Cosmology` class or None, optional
If `None` (default) this class is searched instead of `kls`.
Raises
------
TypeError
If more than one non-flat class is found at the same level of the
inheritance. This is similar to the error normally raised by Python
for an inconsistent method resolution order.
Returns
-------
type
A :class:`Cosmology` subclass this class inherits from that is not a
:class:`FlatCosmologyMixin` subclass."""
(definition of FlatCosmologyMixin.equivalent_nonflat:)
def equivalent_nonflat(self: _FlatCosmoT) -> _CosmoT:
"""Return the equivalent non-flat-class instance of this cosmology."""
[end of new definitions in astropy/cosmology/core.py]
[start of new definitions in astropy/cosmology/flrw/base.py]
(definition of FlatFLRWMixin.equivalent_nonflat:)
def equivalent_nonflat(self: _FlatFLRWMixinT) -> _FLRWT:
[end of new definitions in astropy/cosmology/flrw/base.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 7cbba866a8c5749b90a5cb4f9877ddfad2d36037 | |
astropy__astropy-13075 | 13,075 | astropy/astropy | 5.0 | c660b079b6472920662ca4a0c731751a0342448c | 2022-04-06T19:44:23Z | diff --git a/astropy/cosmology/io/__init__.py b/astropy/cosmology/io/__init__.py
index c1dc9d4a9919..30f0469fa1f7 100644
--- a/astropy/cosmology/io/__init__.py
+++ b/astropy/cosmology/io/__init__.py
@@ -5,4 +5,4 @@
"""
# Import to register with the I/O machinery
-from . import cosmology, ecsv, mapping, model, row, table, yaml
+from . import cosmology, ecsv, html, mapping, model, row, table, yaml # noqa: F401
diff --git a/astropy/cosmology/io/html.py b/astropy/cosmology/io/html.py
new file mode 100644
index 000000000000..f751d306a3f1
--- /dev/null
+++ b/astropy/cosmology/io/html.py
@@ -0,0 +1,189 @@
+import astropy.cosmology.units as cu
+import astropy.units as u
+from astropy.cosmology.connect import readwrite_registry
+from astropy.cosmology.core import Cosmology
+from astropy.cosmology.parameter import Parameter
+from astropy.table import QTable
+
+from .table import from_table, to_table
+
+# Format look-up for conversion, {original_name: new_name}
+# TODO! move this information into the Parameters themselves
+_FORMAT_TABLE = {
+ "H0": "$$H_{0}$$",
+ "Om0": "$$\\Omega_{m,0}$$",
+ "Ode0": "$$\\Omega_{\\Lambda,0}$$",
+ "Tcmb0": "$$T_{0}$$",
+ "Neff": "$$N_{eff}$$",
+ "m_nu": "$$m_{nu}$$",
+ "Ob0": "$$\\Omega_{b,0}$$",
+ "w0": "$$w_{0}$$",
+ "wa": "$$w_{a}$$",
+ "wz": "$$w_{z}$$",
+ "wp": "$$w_{p}$$",
+ "zp": "$$z_{p}$$",
+}
+
+
+def read_html_table(filename, index=None, *, move_to_meta=False, cosmology=None, latex_names=True, **kwargs):
+ """Read a |Cosmology| from an HTML file.
+
+ Parameters
+ ----------
+ filename : path-like or file-like
+ From where to read the Cosmology.
+ index : int or str or None, optional
+ Needed to select the row in tables with multiple rows. ``index`` can be
+ an integer for the row number or, if the table is indexed by a column,
+ the value of that column. If the table is not indexed and ``index`` is a
+ string, the "name" column is used as the indexing column.
+
+ move_to_meta : bool, optional keyword-only
+ Whether to move keyword arguments that are not in the Cosmology class'
+ signature to the Cosmology's metadata. This will only be applied if the
+ Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
+ Arguments moved to the metadata will be merged with existing metadata,
+ preferring specified metadata in the case of a merge conflict (e.g. for
+ ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be
+ ``{'key': 10}``).
+ cosmology : str or |Cosmology| class or None, optional keyword-only
+ The cosmology class (or string name thereof) to use when constructing
+ the cosmology instance. The class also provides default parameter
+ values, filling in any non-mandatory arguments missing in 'table'.
+ latex_names : bool, optional keyword-only
+ Whether the |Table| (might) have latex column names for the parameters
+ that need to be mapped to the correct parameter name -- e.g. $$H_{0}$$
+ to 'H0'. This is `True` by default, but can be turned off (set to
+ `False`) if there is a known name conflict (e.g. both an 'H0' and
+ '$$H_{0}$$' column) as this will raise an error. In this case, the
+ correct name ('H0') is preferred.
+ **kwargs : Any
+ Passed to :attr:`astropy.table.QTable.read`. ``format`` is set to
+ 'ascii.html', regardless of input.
+
+ Returns
+ -------
+ |Cosmology| subclass instance
+
+ Raises
+ ------
+ ValueError
+ If the keyword argument 'format' is given and is not "ascii.html".
+ """
+ # Check that the format is 'ascii.html' (or not specified)
+ format = kwargs.pop("format", "ascii.html")
+ if format != "ascii.html":
+ raise ValueError(f"format must be 'ascii.html', not {format}")
+
+ # Reading is handled by `QTable`.
+ with u.add_enabled_units(cu): # (cosmology units not turned on by default)
+ table = QTable.read(filename, format="ascii.html", **kwargs)
+
+ # Need to map the table's column names to Cosmology inputs (parameter
+ # names).
+ # TODO! move the `latex_names` into `from_table`
+ if latex_names:
+ table_columns = set(table.colnames)
+ for name, latex in _FORMAT_TABLE.items():
+ if latex in table_columns:
+ table.rename_column(latex, name)
+
+ # Build the cosmology from table, using the private backend.
+ return from_table(table, index=index, move_to_meta=move_to_meta, cosmology=cosmology)
+
+
+def write_html_table(cosmology, file, *, overwrite=False, cls=QTable, latex_names=False, **kwargs):
+ r"""Serialize the |Cosmology| into a HTML table.
+
+ Parameters
+ ----------
+ cosmology : |Cosmology| subclass instance file : path-like or file-like
+ Location to save the serialized cosmology.
+ file : path-like or file-like
+ Where to write the html table.
+
+ overwrite : bool, optional keyword-only
+ Whether to overwrite the file, if it exists.
+ cls : |Table| class, optional keyword-only
+ Astropy |Table| (sub)class to use when writing. Default is |QTable|
+ class.
+ latex_names : bool, optional keyword-only
+ Whether to format the parameters (column) names to latex -- e.g. 'H0' to
+ $$H_{0}$$.
+ **kwargs : Any
+ Passed to ``cls.write``.
+
+ Raises
+ ------
+ TypeError
+ If the optional keyword-argument 'cls' is not a subclass of |Table|.
+ ValueError
+ If the keyword argument 'format' is given and is not "ascii.html".
+
+ Notes
+ -----
+ A HTML file containing a Cosmology HTML table should have scripts enabling
+ MathJax.
+
+ ::
+ <script
+ src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
+ <script type="text/javascript" id="MathJax-script" async
+ src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js">
+ </script>
+ """
+ # Check that the format is 'ascii.html' (or not specified)
+ format = kwargs.pop("format", "ascii.html")
+ if format != "ascii.html":
+ raise ValueError(f"format must be 'ascii.html', not {format}")
+
+ # Set cosmology_in_meta as false for now since there is no metadata being kept
+ table = to_table(cosmology, cls=cls, cosmology_in_meta=False)
+
+ cosmo_cls = type(cosmology)
+ for name, col in table.columns.items():
+ param = getattr(cosmo_cls, name, None)
+ if not isinstance(param, Parameter) or param.unit in (None, u.one):
+ continue
+ # Replace column with unitless version
+ table.replace_column(name, (col << param.unit).value, copy=False)
+
+ # TODO! move the `latex_names` into `to_table`
+ if latex_names:
+ new_names = [_FORMAT_TABLE.get(k, k) for k in cosmology.__parameters__]
+ table.rename_columns(cosmology.__parameters__, new_names)
+
+ # Write HTML, using table I/O
+ table.write(file, overwrite=overwrite, format="ascii.html", **kwargs)
+
+
+def html_identify(origin, filepath, fileobj, *args, **kwargs):
+ """Identify if an object uses the HTML Table format.
+
+ Parameters
+ ----------
+ origin : Any
+ Not used.
+ filepath : str or Any
+ From where to read the Cosmology.
+ fileobj : Any
+ Not used.
+ *args : Any
+ Not used.
+ **kwargs : Any
+ Not used.
+
+ Returns
+ -------
+ bool
+ If the filepath is a string ending with '.html'.
+ """
+ return isinstance(filepath, str) and filepath.endswith(".html")
+
+
+# ===================================================================
+# Register
+
+readwrite_registry.register_reader("ascii.html", Cosmology, read_html_table)
+readwrite_registry.register_writer("ascii.html", Cosmology, write_html_table)
+readwrite_registry.register_identifier("ascii.html", Cosmology, html_identify)
diff --git a/docs/changes/cosmology/13075.feature.rst b/docs/changes/cosmology/13075.feature.rst
new file mode 100644
index 000000000000..e6206f0aa0ea
--- /dev/null
+++ b/docs/changes/cosmology/13075.feature.rst
@@ -0,0 +1,2 @@
+Cosmology instance can be parsed from or converted to a HTML table using
+the new HTML methods in Cosmology's ``to/from_format`` I/O.
diff --git a/docs/cosmology/io.rst b/docs/cosmology/io.rst
index ee93245bfd3e..e4da4b822469 100644
--- a/docs/cosmology/io.rst
+++ b/docs/cosmology/io.rst
@@ -33,9 +33,9 @@ Getting Started
The |Cosmology| class includes two methods, |Cosmology.read| and
|Cosmology.write|, that make it possible to read from and write to files.
-Currently the only registered ``read`` / ``write`` format is "ascii.ecsv",
-like for Table. Also, custom ``read`` / ``write`` formats may be registered
-into the Astropy Cosmology I/O framework.
+The registered ``read`` / ``write`` formats include "ascii.ecsv" and
+"ascii.html", like for Table. Also, custom ``read`` / ``write`` formats may be
+registered into the Astropy Cosmology I/O framework.
Writing a cosmology instance requires only the file location and optionally,
if the file format cannot be inferred, a keyword argument "format". Additional
@@ -66,6 +66,7 @@ To see a list of the available read/write file formats:
Format Read Write Auto-identify
---------- ---- ----- -------------
ascii.ecsv Yes Yes Yes
+ ascii.html Yes Yes Yes
This list will include both built-in and registered 3rd-party formats.
diff --git a/docs/whatsnew/5.2.rst b/docs/whatsnew/5.2.rst
index 312615d920a0..252aa79f7860 100644
--- a/docs/whatsnew/5.2.rst
+++ b/docs/whatsnew/5.2.rst
@@ -51,6 +51,23 @@ cosmologies with their non-flat equivalents.
True
+A cosmology can be parsed from or converted to a HTML table using
+the new HTML methods in Cosmology's ``to/from_format`` I/O.
+
+ >>> from astropy.cosmology import Planck18
+ >>> Planck18.write("planck18.html")
+
+The columns can be latex/mathjax formatted using the flag ``latex_names=True``;
+then if the following is added to the file's header, the column names will
+render nicely.::
+
+ <script
+ src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
+ <script type="text/javascript" id="MathJax-script" async
+ src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js">
+ </script>
+
+
.. _whatsnew-5.2-coordinates:
Topocentric ITRS Frame
| diff --git a/astropy/cosmology/io/tests/test_.py b/astropy/cosmology/io/tests/test_.py
index b43618845e24..1ab029abc73f 100644
--- a/astropy/cosmology/io/tests/test_.py
+++ b/astropy/cosmology/io/tests/test_.py
@@ -12,7 +12,7 @@ def test_expected_readwrite_io():
"""Test that ONLY the expected I/O is registered."""
got = {k for k, _ in readwrite_registry._readers.keys()}
- expected = {"ascii.ecsv"}
+ expected = {"ascii.ecsv", "ascii.html"}
assert got == expected
diff --git a/astropy/cosmology/io/tests/test_html.py b/astropy/cosmology/io/tests/test_html.py
new file mode 100644
index 000000000000..b0f830559d7b
--- /dev/null
+++ b/astropy/cosmology/io/tests/test_html.py
@@ -0,0 +1,256 @@
+# Licensed under a 3-clause BSD style license - see LICENSE.rst
+
+# THIRD PARTY
+import pytest
+
+import astropy.units as u
+from astropy.cosmology.io.html import _FORMAT_TABLE, read_html_table, write_html_table
+from astropy.cosmology.parameter import Parameter
+from astropy.table import QTable, Table, vstack
+from astropy.units.decorators import NoneType
+from astropy.utils.compat.optional_deps import HAS_BS4
+
+from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
+
+###############################################################################
+
+
+class ReadWriteHTMLTestMixin(ReadWriteTestMixinBase):
+ """
+ Tests for a Cosmology[Read/Write] with ``format="ascii.html"``.
+ This class will not be directly called by :mod:`pytest` since its name does
+ not begin with ``Test``. To activate the contained tests this class must
+ be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
+ ``cosmo`` that returns/yields an instance of a |Cosmology|.
+ See ``TestCosmology`` for an example.
+ """
+
+ @pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
+ def test_to_html_table_bad_index(self, read, write, tmp_path):
+ """Test if argument ``index`` is incorrect"""
+ fp = tmp_path / "test_to_html_table_bad_index.html"
+
+ write(fp, format="ascii.html")
+
+ # single-row table and has a non-0/None index
+ with pytest.raises(IndexError, match="index 2 out of range"):
+ read(fp, index=2, format="ascii.html")
+
+ # string index where doesn't match
+ with pytest.raises(KeyError, match="No matches found for key"):
+ read(fp, index="row 0", format="ascii.html")
+
+ # -----------------------
+
+ @pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
+ def test_to_html_table_failed_cls(self, write, tmp_path):
+ """Test failed table type."""
+ fp = tmp_path / "test_to_html_table_failed_cls.html"
+
+ with pytest.raises(TypeError, match="'cls' must be"):
+ write(fp, format='ascii.html', cls=list)
+
+ @pytest.mark.parametrize("tbl_cls", [QTable, Table])
+ @pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
+ def test_to_html_table_cls(self, write, tbl_cls, tmp_path):
+ fp = tmp_path / "test_to_html_table_cls.html"
+ write(fp, format='ascii.html', cls=tbl_cls)
+
+ # -----------------------
+
+ @pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
+ def test_readwrite_html_table_instance(self, cosmo_cls, cosmo, read, write, tmp_path, add_cu):
+ """Test cosmology -> ascii.html -> cosmology."""
+ fp = tmp_path / "test_readwrite_html_table_instance.html"
+
+ # ------------
+ # To Table
+
+ write(fp, format="ascii.html")
+
+ # some checks on the saved file
+ tbl = QTable.read(fp)
+ # assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ # metadata read not implemented
+ assert tbl["name"] == cosmo.name
+
+ # ------------
+ # From Table
+
+ tbl["mismatching"] = "will error"
+ tbl.write(fp, format="ascii.html", overwrite=True)
+
+ # tests are different if the last argument is a **kwarg
+ if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
+ got = read(fp, format="ascii.html")
+
+ assert got.__class__ is cosmo_cls
+ assert got.name == cosmo.name
+ # assert "mismatching" not in got.meta # metadata read not implemented
+
+ return # don't continue testing
+
+ # read with mismatching parameters errors
+ with pytest.raises(TypeError, match="there are unused parameters"):
+ read(fp, format="ascii.html")
+
+ # unless mismatched are moved to meta
+ got = read(fp, format="ascii.html", move_to_meta=True)
+ assert got == cosmo
+ # assert got.meta["mismatching"] == "will error" # metadata read not implemented
+
+ # it won't error if everything matches up
+ tbl.remove_column("mismatching")
+ tbl.write(fp, format="ascii.html", overwrite=True)
+ got = read(fp, format="ascii.html")
+ assert got == cosmo
+
+ # and it will also work if the cosmology is a class
+ # Note this is not the default output of ``write``.
+ # tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]] #
+ # metadata read not implemented
+ got = read(fp, format="ascii.html")
+ assert got == cosmo
+
+ got = read(fp)
+ assert got == cosmo
+
+ @pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
+ def test_rename_html_table_columns(self, read, write, tmp_path):
+ """Tests renaming columns"""
+ fp = tmp_path / "test_rename_html_table_columns.html"
+
+ write(fp, format="ascii.html", latex_names=True)
+
+ tbl = QTable.read(fp)
+
+ # asserts each column name has not been reverted yet
+ # For now, Cosmology class and name are stored in first 2 slots
+ for column_name in tbl.colnames[2:]:
+ assert column_name in _FORMAT_TABLE.values()
+
+ cosmo = read(fp, format="ascii.html")
+ converted_tbl = cosmo.to_format("astropy.table")
+
+ # asserts each column name has been reverted
+ # cosmology name is still stored in first slot
+ for column_name in converted_tbl.colnames[1:]:
+ assert column_name in _FORMAT_TABLE.keys()
+
+ @pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
+ @pytest.mark.parametrize("latex_names", [True, False])
+ def test_readwrite_html_subclass_partial_info(self, cosmo_cls, cosmo, read,
+ write, latex_names, tmp_path, add_cu):
+ """
+ Test writing from an instance and reading from that class.
+ This works with missing information.
+ """
+ fp = tmp_path / "test_read_html_subclass_partial_info.html"
+
+ # test write
+ write(fp, format="ascii.html", latex_names=latex_names)
+
+ # partial information
+ tbl = QTable.read(fp)
+
+ # tbl.meta.pop("cosmology", None) # metadata not implemented
+ cname = "$$T_{0}$$" if latex_names else "Tcmb0"
+ del tbl[cname] # format is not converted to original units
+ tbl.write(fp, overwrite=True)
+
+ # read with the same class that wrote fills in the missing info with
+ # the default value
+ got = cosmo_cls.read(fp, format="ascii.html")
+ got2 = read(fp, format="ascii.html", cosmology=cosmo_cls)
+ got3 = read(fp, format="ascii.html", cosmology=cosmo_cls.__qualname__)
+
+ assert (got == got2) and (got2 == got3) # internal consistency
+
+ # not equal, because Tcmb0 is changed, which also changes m_nu
+ assert got != cosmo
+ assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
+ assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
+ # but the metadata is the same
+ # assert got.meta == cosmo.meta # metadata read not implemented
+
+ @pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
+ def test_readwrite_html_mutlirow(self, cosmo, read, write, tmp_path, add_cu):
+ """Test if table has multiple rows."""
+ fp = tmp_path / "test_readwrite_html_mutlirow.html"
+
+ # Make
+ cosmo1 = cosmo.clone(name="row 0")
+ cosmo2 = cosmo.clone(name="row 2")
+ table = vstack([c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
+ metadata_conflicts='silent')
+
+ cosmo_cls = type(cosmo)
+ if cosmo_cls == NoneType:
+ assert False
+
+ for n, col in zip(table.colnames, table.itercols()):
+ if n == "cosmology":
+ continue
+ param = getattr(cosmo_cls, n)
+ if not isinstance(param, Parameter) or param.unit in (None, u.one):
+ continue
+ # Replace column with unitless version
+ table.replace_column(n, (col << param.unit).value, copy=False)
+
+ table.write(fp, format="ascii.html")
+
+ # ------------
+ # From Table
+
+ # it will error on a multi-row table
+ with pytest.raises(ValueError, match="need to select a specific row"):
+ read(fp, format="ascii.html")
+
+ # unless the index argument is provided
+ got = cosmo_cls.read(fp, index=1, format="ascii.html")
+ # got = read(fp, index=1, format="ascii.html")
+ assert got == cosmo
+
+ # the index can be a string
+ got = cosmo_cls.read(fp, index=cosmo.name, format="ascii.html")
+ assert got == cosmo
+
+ # it's better if the table already has an index
+ # this will be identical to the previous ``got``
+ table.add_index("name")
+ got2 = cosmo_cls.read(fp, index=cosmo.name, format="ascii.html")
+ assert got2 == cosmo
+
+
+class TestReadWriteHTML(ReadWriteDirectTestBase, ReadWriteHTMLTestMixin):
+ """
+ Directly test ``read/write_html``.
+ These are not public API and are discouraged from use, in favor of
+ ``Cosmology.read/write(..., format="ascii.html")``, but should be
+ tested regardless b/c they are used internally.
+ """
+
+ def setup_class(self):
+ self.functions = {"read": read_html_table, "write": write_html_table}
+
+ @pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
+ def test_rename_direct_html_table_columns(self, read, write, tmp_path):
+ """Tests renaming columns"""
+
+ fp = tmp_path / "test_rename_html_table_columns.html"
+
+ write(fp, format="ascii.html", latex_names=True)
+
+ tbl = QTable.read(fp)
+
+ # asserts each column name has not been reverted yet
+ for column_name in tbl.colnames[2:]:
+ # for now, Cosmology as metadata and name is stored in first 2 slots
+ assert column_name in _FORMAT_TABLE.values()
+
+ cosmo = read(fp, format="ascii.html")
+ converted_tbl = cosmo.to_format("astropy.table")
+
+ # asserts each column name has been reverted
+ for column_name in converted_tbl.colnames[1:]:
+ # for now now, metadata is still stored in first slot
+ assert column_name in _FORMAT_TABLE.keys()
diff --git a/astropy/cosmology/tests/test_connect.py b/astropy/cosmology/tests/test_connect.py
index f99afe525888..7efca76b07d8 100644
--- a/astropy/cosmology/tests/test_connect.py
+++ b/astropy/cosmology/tests/test_connect.py
@@ -9,8 +9,10 @@
from astropy.cosmology import Cosmology, w0wzCDM
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.io.tests import (
- test_cosmology, test_ecsv, test_json, test_mapping, test_model, test_row, test_table, test_yaml)
+ test_cosmology, test_ecsv, test_html, test_json, test_mapping, test_model, test_row, test_table,
+ test_yaml)
from astropy.table import QTable, Row
+from astropy.utils.compat.optional_deps import HAS_BS4
###############################################################################
# SETUP
@@ -18,7 +20,13 @@
cosmo_instances = cosmology.realizations.available
# Collect the registered read/write formats.
-readwrite_formats = {"ascii.ecsv", "json"}
+# (format, supports_metadata, has_all_required_dependencies)
+readwrite_formats = {
+ ("ascii.ecsv", True, True),
+ ("ascii.html", False, HAS_BS4),
+ ("json", True, True)
+}
+
# Collect all the registered to/from formats. Unfortunately this is NOT
# automatic since the output format class is not stored on the registry.
@@ -27,10 +35,14 @@
("astropy.cosmology", Cosmology),
("astropy.row", Row), ("astropy.table", QTable)]
+
###############################################################################
-class ReadWriteTestMixin(test_ecsv.ReadWriteECSVTestMixin, test_json.ReadWriteJSONTestMixin):
+class ReadWriteTestMixin(
+ test_ecsv.ReadWriteECSVTestMixin,
+ test_html.ReadWriteHTMLTestMixin,
+ test_json.ReadWriteJSONTestMixin):
"""
Tests for a CosmologyRead/Write on a |Cosmology|.
This class will not be directly called by :mod:`pytest` since its name does
@@ -40,14 +52,17 @@ class ReadWriteTestMixin(test_ecsv.ReadWriteECSVTestMixin, test_json.ReadWriteJS
See ``TestReadWriteCosmology`` or ``TestCosmology`` for examples.
"""
- @pytest.mark.parametrize("format", readwrite_formats)
- def test_readwrite_complete_info(self, cosmo, tmp_path, format):
+ @pytest.mark.parametrize("format, metaio, has_deps", readwrite_formats)
+ def test_readwrite_complete_info(self, cosmo, tmp_path, format, metaio, has_deps):
"""
Test writing from an instance and reading from the base class.
This requires full information.
The round-tripped metadata can be in a different order, so the
OrderedDict must be converted to a dict before testing equality.
"""
+ if not has_deps:
+ pytest.skip("missing a dependency")
+
fname = str(tmp_path / f"{cosmo.name}.{format}")
cosmo.write(fname, format=format)
@@ -63,31 +78,35 @@ def test_readwrite_complete_info(self, cosmo, tmp_path, format):
got = Cosmology.read(fname, format=format)
assert got == cosmo
- assert dict(got.meta) == dict(cosmo.meta)
+ assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
- @pytest.mark.parametrize("format", readwrite_formats)
- def test_readwrite_from_subclass_complete_info(self, cosmo_cls, cosmo, tmp_path, format):
+ @pytest.mark.parametrize("format, metaio, has_deps", readwrite_formats)
+ def test_readwrite_from_subclass_complete_info(
+ self, cosmo_cls, cosmo, tmp_path, format, metaio, has_deps):
"""
Test writing from an instance and reading from that class, when there's
full information saved.
"""
+ if not has_deps:
+ pytest.skip("missing a dependency")
+
fname = str(tmp_path / f"{cosmo.name}.{format}")
cosmo.write(fname, format=format)
# read with the same class that wrote.
got = cosmo_cls.read(fname, format=format)
assert got == cosmo
- assert got.meta == cosmo.meta
+ assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
# this should be equivalent to
got = Cosmology.read(fname, format=format, cosmology=cosmo_cls)
assert got == cosmo
- assert got.meta == cosmo.meta
+ assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
# and also
got = Cosmology.read(fname, format=format, cosmology=cosmo_cls.__qualname__)
assert got == cosmo
- assert got.meta == cosmo.meta
+ assert (not metaio) ^ (dict(got.meta) == dict(cosmo.meta))
class TestCosmologyReadWrite(ReadWriteTestMixin):
@@ -103,8 +122,11 @@ def cosmo_cls(self, cosmo):
# ==============================================================
- @pytest.mark.parametrize("format", readwrite_formats)
- def test_write_methods_have_explicit_kwarg_overwrite(self, format):
+ @pytest.mark.parametrize("format, _, has_deps", readwrite_formats)
+ def test_write_methods_have_explicit_kwarg_overwrite(self, format, _, has_deps):
+ if not has_deps:
+ pytest.skip("missing a dependency")
+
writer = readwrite_registry.get_writer(format, Cosmology)
# test in signature
sig = inspect.signature(writer)
@@ -113,11 +135,13 @@ def test_write_methods_have_explicit_kwarg_overwrite(self, format):
# also in docstring
assert "overwrite : bool" in writer.__doc__
- @pytest.mark.parametrize("format", readwrite_formats)
- def test_readwrite_reader_class_mismatch(self, cosmo, tmp_path, format):
+ @pytest.mark.parametrize("format, _, has_deps", readwrite_formats)
+ def test_readwrite_reader_class_mismatch(self, cosmo, tmp_path, format, _, has_deps):
"""Test when the reader class doesn't match the file."""
+ if not has_deps:
+ pytest.skip("missing a dependency")
- fname = str(tmp_path / f"{cosmo.name}.{format}")
+ fname = tmp_path / f"{cosmo.name}.{format}"
cosmo.write(fname, format=format)
# class mismatch
| diff --git a/docs/changes/cosmology/13075.feature.rst b/docs/changes/cosmology/13075.feature.rst
new file mode 100644
index 000000000000..e6206f0aa0ea
--- /dev/null
+++ b/docs/changes/cosmology/13075.feature.rst
@@ -0,0 +1,2 @@
+Cosmology instance can be parsed from or converted to a HTML table using
+the new HTML methods in Cosmology's ``to/from_format`` I/O.
diff --git a/docs/cosmology/io.rst b/docs/cosmology/io.rst
index ee93245bfd3e..e4da4b822469 100644
--- a/docs/cosmology/io.rst
+++ b/docs/cosmology/io.rst
@@ -33,9 +33,9 @@ Getting Started
The |Cosmology| class includes two methods, |Cosmology.read| and
|Cosmology.write|, that make it possible to read from and write to files.
-Currently the only registered ``read`` / ``write`` format is "ascii.ecsv",
-like for Table. Also, custom ``read`` / ``write`` formats may be registered
-into the Astropy Cosmology I/O framework.
+The registered ``read`` / ``write`` formats include "ascii.ecsv" and
+"ascii.html", like for Table. Also, custom ``read`` / ``write`` formats may be
+registered into the Astropy Cosmology I/O framework.
Writing a cosmology instance requires only the file location and optionally,
if the file format cannot be inferred, a keyword argument "format". Additional
@@ -66,6 +66,7 @@ To see a list of the available read/write file formats:
Format Read Write Auto-identify
---------- ---- ----- -------------
ascii.ecsv Yes Yes Yes
+ ascii.html Yes Yes Yes
This list will include both built-in and registered 3rd-party formats.
diff --git a/docs/whatsnew/5.2.rst b/docs/whatsnew/5.2.rst
index 312615d920a0..252aa79f7860 100644
--- a/docs/whatsnew/5.2.rst
+++ b/docs/whatsnew/5.2.rst
@@ -51,6 +51,23 @@ cosmologies with their non-flat equivalents.
True
+A cosmology can be parsed from or converted to a HTML table using
+the new HTML methods in Cosmology's ``to/from_format`` I/O.
+
+ >>> from astropy.cosmology import Planck18
+ >>> Planck18.write("planck18.html")
+
+The columns can be latex/mathjax formatted using the flag ``latex_names=True``;
+then if the following is added to the file's header, the column names will
+render nicely.::
+
+ <script
+ src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
+ <script type="text/javascript" id="MathJax-script" async
+ src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js">
+ </script>
+
+
.. _whatsnew-5.2-coordinates:
Topocentric ITRS Frame
| [
{
"components": [
{
"doc": "Read a |Cosmology| from an HTML file.\n\nParameters\n----------\nfilename : path-like or file-like\n From where to read the Cosmology.\nindex : int or str or None, optional\n Needed to select the row in tables with multiple rows. ``index`` can be\n an integer f... | [
"astropy/cosmology/io/tests/test_.py::test_expected_readwrite_io",
"astropy/cosmology/io/tests/test_.py::test_expected_convert_io",
"astropy/cosmology/tests/test_connect.py::TestCosmologyReadWrite::test_readwrite_json_subclass_partial_info[Planck13]",
"astropy/cosmology/tests/test_connect.py::TestCosmologyRea... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Register format ``ascii.html`` to ``Cosmology.write``
<!-- This comments are hidden when you submit the pull request,
so you do not need to remove them! -->
<!-- Please be sure to check out our contributing guidelines,
https://github.com/astropy/astropy/blob/main/CONTRIBUTING.md .
Please be sure to check out our code of conduct,
https://github.com/astropy/astropy/blob/main/CODE_OF_CONDUCT.md . -->
<!-- If you are new or need to be re-acquainted with Astropy
contributing workflow, please see
http://docs.astropy.org/en/latest/development/workflow/development_workflow.html .
There is even a practical example at
https://docs.astropy.org/en/latest/development/workflow/git_edit_workflow_examples.html#astropy-fix-example . -->
<!-- Astropy coding style guidelines can be found here:
https://docs.astropy.org/en/latest/development/codeguide.html#coding-style-conventions
Our testing infrastructure enforces to follow a subset of the PEP8 to be
followed. You can check locally whether your changes have followed these by
running the following command:
tox -e codestyle
-->
<!-- Please just have a quick search on GitHub to see if a similar
pull request has already been posted.
We have old closed pull requests that might provide useful code or ideas
that directly tie in with your pull request. -->
<!-- We have several automatic features that run when a pull request is open.
They can appear daunting but do not worry because maintainers will help
you navigate them, if necessary. -->
### Description
<!-- Provide a general description of what your pull request does.
Complete the following sentence and add relevant details as you see fit. -->
This is a draft pull request for issue #12356 .
<!-- In addition please ensure that the pull request title is descriptive
and allows maintainers to infer the applicable subpackage(s). -->
<!-- READ THIS FOR MANUAL BACKPORT FROM A MAINTAINER:
Apply "skip-basebranch-check" label **before** you open the PR! -->
This draft pull requests adds two python files in astropy/cosmology/io and astropy/cosmology/io/tests .
<!-- If the pull request closes any open issues you can add this.
If you replace <Issue Number> with a number, GitHub will automatically link it.
If this pull request is unrelated to any issues, please remove
the following line. -->
Fixes #12356
### Checklist for package maintainer(s)
<!-- This section is to be filled by package maintainer(s) who will
review this pull request. -->
This checklist is meant to remind the package maintainer(s) who will review this pull request of some common things to look for. This list is not exhaustive.
- [ ] Do the proposed changes actually accomplish desired goals?
- [ ] Do the proposed changes follow the [Astropy coding guidelines](https://docs.astropy.org/en/latest/development/codeguide.html)?
- [ ] Are tests added/updated as required? If so, do they follow the [Astropy testing guidelines](https://docs.astropy.org/en/latest/development/testguide.html)?
- [ ] Are docs added/updated as required? If so, do they follow the [Astropy documentation guidelines](https://docs.astropy.org/en/latest/development/docguide.html#astropy-documentation-rules-and-guidelines)?
- [ ] Is rebase and/or squash necessary? If so, please provide the author with appropriate instructions. Also see ["When to rebase and squash commits"](https://docs.astropy.org/en/latest/development/when_to_rebase.html).
- [ ] Did the CI pass? If no, are the failures related? If you need to run daily and weekly cron jobs as part of the PR, please apply the `Extra CI` label.
- [ ] Is a change log needed? If yes, did the change log check pass? If no, add the `no-changelog-entry-needed` label. If this is a manual backport, use the `skip-changelog-checks` label unless special changelog handling is necessary.
- [ ] Is this a big PR that makes a "What's new?" entry worthwhile and if so, is (1) a "what's new" entry included in this PR and (2) the "whatsnew-needed" label applied?
- [ ] Is a milestone set? Milestone must be set but `astropy-bot` check might be missing; do not let the green checkmark fool you.
- [ ] At the time of adding the milestone, if the milestone set requires a backport to release branch(es), apply the appropriate `backport-X.Y.x` label(s) *before* merge.
----------
Hello @JefftheCloudDog :wave:! It looks like you've made some changes in your pull request, so I've checked the code again for style.
There are no PEP8 style issues with this pull request - thanks! :tada:
##### Comment last updated at 2022-07-09 20:53:40 UTC
👋 Thank you for your draft pull request! Do you know that you can use `[ci skip]` or `[skip ci]` in your commit messages to skip running continuous integration tests until you are ready?
@JefftheCloudDog the issue appears to be that the HTML table writer doesn't work with units.
Here's how I found the relevant documentation, which includes all the various things the HTML writer can do:
```python
from astropy.cosmology import Planck18
tbl = Planck18.to_format("astropy.table")
tbl.writer.help("html")
```
While mildly annoying, there is an easy fix for the unit issue: every Parameter on a Cosmology stores the preferred unit, so you can convert each column to its unitless equivalent. This is a quick and dirty method, there's probably something cleaner.
Using Planck18 as an example:
```python
import astropy.units as u
from astropy.cosmology import Planck18, Parameter
cosmo_cls = type(Planck18)
tbl = Planck18.to_format("astropy.table")
for n, col in zip(tbl.colnames, tbl.itercols()): # Iter thru name and column
param = getattr(cosmo_cls, n) # Get parameter, and check it
if not isinstance(param, Parameter) or param.unit in (None, u.one):
continue
# Replace column with unitless version
tbl.replace_column(n, col.to_value(param.unit), copy=False)
tbl
```
@JefftheCloudDog the CI isn't fully running because the Code Style Check runs first and it's currently failing. There's usually an easy fix and that's to run ``tox -e codestyle`` from your terminal in the astropy folder. It can clean up most errors by itself. You might need to install tox.
```
conda activate [your env name]
conda install tox
cd ~/path/to/astropy
tox -e codestyle
```
I am a little stuck on the fix. From my testing, it appears that metadata is not stored properly as part of a cosmology object's metadata.
There are two fail cases:
1. read cosmology with metadata in table = cosmology object with metadata in columns with no metadata set
2. read cosmology without metadata in table = cosmology object with not metadata
Perhaps I am reading the documentation wrong, but is there a built in method for HTML that automatically converts a HTML metadata column to metadata? I was not able to find documentation about any such methods.
@JefftheCloudDog, a good observation. I just did some testing and it doesn't look like the table -> HTML writer keeps any of a table's metadata! Fixing this is out of the scope of this PR, so let's just make the Cosmology <-> HTML work as well as possible within these restrictions. Don't worry about a Cosmology's metadata. I think one thing to be careful about is the ``cosmology_in_meta`` argument of ``to_table``.
@nstarman I set the cosmology_in_meta to be False for now because of the metadata issues. The tests should all pass now, I commented out the offending assert statements.
@JefftheCloudDog, it looks like the CI didn't start. Can you rebase and re-push? That should get it going. Thanks!
@nstarman I rebased and repushed. Looks like there are still some errors listed, like `CI / Python 3.9 with all optional dependencies (pull_request)` and `Check PR change log / Check if towncrier change log entry is correct (pull_request_target)`
@JefftheCloudDog If you click on the former it will take you to the full traceback, which is very useful for diagnosing problems. The changelog error is because this PR does not yet have a changelog. See https://docs.astropy.org/en/latest/development/workflow/development_workflow.html#add-a-changelog-entry for details.
To get an easier diagnosis, try in the command line
```bash
>>> pytest -P cosmology -v -x --pdb
```
I updated __init__.py to include html as an import, looks like I am getting this circular import error regarding this line in __init__.py:
`from . import cosmology, ecsv, html, mapping, model, row, table, yaml # noqa: F401, F403`
and html.py:
`from astropy.cosmology import Parameter`
Seems like the error could be popping up because of the order of importing Parameter from astropy.cosmology, but rearanging the import order does not seem to fix this issue.
@JefftheCloudDog, please rebase on the main branch instead of merging.
@nstarman My mistake. I tried reverting back the previous commits and rebasing on main, but it looks like the feature branch is already up to date.
@nstarman I am a little new to rebasing, but I think I got it mostly figured out.
Locally, I tried reverting back to before the merge and rebasing with the main branch from there. I did modify a few more files regarding testing, such as `astropy\cosmology\io\tests\test_.py` to expect `ascii.html` so pytest takes in account of the new registered HTML table reader/writer.
I also want to note that some files were not added during the rebase, such as `docs/changes/coordinates/13162.api.rst`, which main does have. I am going to look into this. Such missing files are causing certain fails in the checks.
I also think there are rather a lot of commits at this point, and in the end, squashing might be required.
@nstarman Ok, I think I got the hang of rebasing on my end. I did end up deleting some commits on my end during the rebase, mostly the merges.
I am not sure if it would be best to connect the new cosmology HTML to reader/writer to `astropy\astropy\cosmology\tests\test_connect.py`, since some of the tests look for metadata, and as we discussed above, there exist issues with metadata outside of the scope of this pull request. There might be other issues relating to metadata, but I am still reading through pytest's output.
E.g. in test_connect.py line 82:
`assert got.meta == cosmo.meta` is false, since the metadata would be missing entirely in the read in cosmology object.
@JefftheCloudDog thanks for rebasing and cleaning up some of the commits.
R.e. connecting to ``ReadWriteTestMixin``. This is how we test that the HTML writer works with each Cosmology subclass (e.g. w0waCDM), so it's quite important. As we agreed, saving and reading the metadata is out of scope, so instead we can just adjust the tests.
I suggest adding a boolean indicating whether the metadata round-trips.
```python
readwrite_formats = {("ascii.ecsv", True), ("ascii.html", False), ("json", True)}
```
Then the pytest parameter and test become
```python
@pytest.mark.parametrize("format, metaio", readwrite_formats)
def test_readwrite_complete_info(self, cosmo, tmpdir, format, metaio):
....
if metaio:
assert dict(got.meta) == dict(cosmo.meta)
```
@nstarman A number of the fails listed in the CI check shows BeautifulSoup import errors.
` astropy.io.ascii.core.OptionalTableImportError: BeautifulSoup must be installed to read HTML tables` suggests that beautiful soup is not installed, and I ran those tests locally and do not fail with any BeautifulSoup errors. I am not quite sure how to fix this import issue, do you have any pointers?
E.g
```self = <astropy.io.ascii.html.HTMLInputter object at 0x7fcdb850db40>
lines = ['<html>', ' <head>', ' <meta charset="utf-8"/>', ' <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>', ' </head>', ' <body>', ...]
def process_lines(self, lines):
"""
Convert the given input into a list of SoupString rows
for further processing.
"""
try:
> from bs4 import BeautifulSoup
E ModuleNotFoundError: No module named 'bs4'
../../.tox/py310-test-cov/lib/python3.10/site-packages/astropy/io/ascii/html.py:81: ModuleNotFoundError```
@JefftheCloudDog, the issue is that the HTML I/O requires a third party library that isn't always included in the CI. We can just hack on another boolean in the CI. See imperative skipping in https://docs.pytest.org/en/7.1.x/how-to/skipping.html#skipping-test-functions.
```python
from astropy.utils.compat.optional_deps import HAS_BS4
```
```python
readwrite_formats = {("ascii.ecsv", True, False), ("ascii.html", False, HAS_BS4), ("json", True, False)}
```
In the tests,
```python
def test_readwrite_complete_info(self, cosmo, tmpdir, format, metaio, has_deps):
if not has_deps:
pytest.skip("missing a dependency")
```
@nstarman Looks like all but one check is failing, which I suppose is just writing the proper documentation.
@JefftheCloudDog , please try to rebase against `upstream/main` and see if the RTD error goes way. Thanks!
Looks like all checks pass now! I suppose what's needed now is to rewrite io.rst's description of Cosmology's io, clean up any messy code if needed, and squash.
I added a test for the column name conversion. The test covers both cases. The first loop tests the write conversion to an alternative name and the second loop tests the conversion back to the original name per the lookup table.
"Correcting the Table check is important since `cls` allows for any Table subclass."
Do you mean adding an early exit/warning if cls is not a QTable?
> "Correcting the Table check is important since `cls` allows for any Table subclass."
>
> Do you mean adding an early exit/warning if cls is not a QTable?
See https://github.com/astropy/astropy/pull/13075#discussion_r857669518.
Understood. Also, by adding HTML that renders well with MathJax, would the following conversion make sense:
`"H0": "$$\\H0$$"`, i.e. converting H0 to $$\H0$$ in HTML. I referenced the MathJax for HTML formatting [demos ](https://github.com/mathjax/MathJax-demos-web).
Or should the value be something else for testing purposes?
> Or should the value be something else for testing purposes?
We definitely want to provide to the user the correct values, so those will have to be tested.
> would the following conversion make sense: "H0": "$$\\H0$$"
Give it a try 👍 . Try saving some cosmologies from the different classes and opening the saved HTML file in your browser.
If it looks good, then this PR will be getting close!

Here is an example with MathJax. I had to manually add certain script tags, such as:
```
<script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
<script type="text/javascript" id="MathJax-script" async
src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js">
</script>
```
which was not added automatically from the HTML output. Some do render well with MathJax, such as `m_nu`, but others would need a more suitable format. Is this going in the right direction, or do we have to have a way to add correct script tags when writing to ascii.html?
@nstarman The code should be updated. I added extra `\` for some formatting errors. I amended the last commit for a minor fix. I think some squashing would be somewhat appropriate since the commit log is getting rather long.
There seems to be an issue with test_readwrite_html_mutlirow.
`TypeError: only dimensionless scalar quantities can be converted to Python scalars`
I'm not quite sure what this means. The other new tests work fine, and I commented out meta-related assertions since HTML meta is not implemented yet.
I am also still working on the TestReadWriteHTML tests and adding an entry to the rst. Is there an example of analogous tests for TestReadWriteHTML? Looks like there are no such tests for other test files in cosmology/io/tests.
> There seems to be an issue with test_readwrite_html_mutlirow. `TypeError: only dimensionless scalar quantities can be converted to Python scalars` I'm not quite sure what this means. The other new tests work fine, and I commented out meta-related assertions since HTML meta is not implemented yet.
This error occurs when a quantity with units is converted to a python scalar. On your machine try ``float(10 * u.km)`` and it should raise this error.
Looking at the log, it appears that some units aren't being stripped. For instance, I looked and saw error messages like this:
``astropy.units.core.UnitConversionError: 'eV' (energy/torque/work) and '' (dimensionless) are not convertible``
The source of this problem is that HTML tables don't support units. Recall in the PR there is
```
# Replace column with unitless version
table.replace_column(n, (col << param.unit).value, copy=False)
```
to strip units. This is applied when calling ``cosmo.write(...)``, because it's part of the Cosmology I/O, but in ``test_readwrite_html_mutlirow`` a multi row table is made and Table's I/O is used.
The solution is to for-loop over the combined table and remove units, like is done above.
If you are interested in a followup PR, you could add a flag to ``to_table`` like ``remove_units`` so that ``cosmo.to_format("astropy.table", remove_units=True) makes a table that can be written to HTML.
e.g.
```
tbl = cosmo.to_format("astropy.table", remove_units=True)
tbl.write("ascii.html")
```
In the meantime, just remove the units in ``test_readwrite_html_mutlirow``.
> I am also still working on the TestReadWriteHTML tests and adding an entry to the rst. Is there an example of analogous tests for `TestReadWriteHTML`? Looks like there are no such tests for other test files in cosmology/io/tests.
The ECSV I/O is a good example of read/write tests
(https://github.com/astropy/astropy/blob/main/astropy/cosmology/io/tests/test_ecsv.py).
Also when writing tests, take a look at the code coverage. That's the best way to see what needs a test!
<img width="848" alt="Screen Shot 2022-06-21 at 11 40 44" src="https://user-images.githubusercontent.com/8949649/174841071-dc8e971d-5718-4a10-b8ce-9ce3d8c4c7cd.png">
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in astropy/cosmology/io/html.py]
(definition of read_html_table:)
def read_html_table(filename, index=None, *, move_to_meta=False, cosmology=None, latex_names=True, **kwargs):
"""Read a |Cosmology| from an HTML file.
Parameters
----------
filename : path-like or file-like
From where to read the Cosmology.
index : int or str or None, optional
Needed to select the row in tables with multiple rows. ``index`` can be
an integer for the row number or, if the table is indexed by a column,
the value of that column. If the table is not indexed and ``index`` is a
string, the "name" column is used as the indexing column.
move_to_meta : bool, optional keyword-only
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict (e.g. for
``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta`` will be
``{'key': 10}``).
cosmology : str or |Cosmology| class or None, optional keyword-only
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter
values, filling in any non-mandatory arguments missing in 'table'.
latex_names : bool, optional keyword-only
Whether the |Table| (might) have latex column names for the parameters
that need to be mapped to the correct parameter name -- e.g. $$H_{0}$$
to 'H0'. This is `True` by default, but can be turned off (set to
`False`) if there is a known name conflict (e.g. both an 'H0' and
'$$H_{0}$$' column) as this will raise an error. In this case, the
correct name ('H0') is preferred.
**kwargs : Any
Passed to :attr:`astropy.table.QTable.read`. ``format`` is set to
'ascii.html', regardless of input.
Returns
-------
|Cosmology| subclass instance
Raises
------
ValueError
If the keyword argument 'format' is given and is not "ascii.html"."""
(definition of write_html_table:)
def write_html_table(cosmology, file, *, overwrite=False, cls=QTable, latex_names=False, **kwargs):
"""Serialize the |Cosmology| into a HTML table.
Parameters
----------
cosmology : |Cosmology| subclass instance file : path-like or file-like
Location to save the serialized cosmology.
file : path-like or file-like
Where to write the html table.
overwrite : bool, optional keyword-only
Whether to overwrite the file, if it exists.
cls : |Table| class, optional keyword-only
Astropy |Table| (sub)class to use when writing. Default is |QTable|
class.
latex_names : bool, optional keyword-only
Whether to format the parameters (column) names to latex -- e.g. 'H0' to
$$H_{0}$$.
**kwargs : Any
Passed to ``cls.write``.
Raises
------
TypeError
If the optional keyword-argument 'cls' is not a subclass of |Table|.
ValueError
If the keyword argument 'format' is given and is not "ascii.html".
Notes
-----
A HTML file containing a Cosmology HTML table should have scripts enabling
MathJax.
::
<script
src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
<script type="text/javascript" id="MathJax-script" async
src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js">
</script>"""
(definition of html_identify:)
def html_identify(origin, filepath, fileobj, *args, **kwargs):
"""Identify if an object uses the HTML Table format.
Parameters
----------
origin : Any
Not used.
filepath : str or Any
From where to read the Cosmology.
fileobj : Any
Not used.
*args : Any
Not used.
**kwargs : Any
Not used.
Returns
-------
bool
If the filepath is a string ending with '.html'."""
[end of new definitions in astropy/cosmology/io/html.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Register format ``html`` to ``Cosmology.write`` with nice mathjax
Cosmology can now read and write to files.
It would be nice to register with ``Cosmology.write`` a method for exporting a Cosmology to a HTML table.
There are good examples of IO with Cosmology at https://github.com/astropy/astropy/tree/main/astropy/cosmology/io
and documentation at https://docs.astropy.org/en/latest/cosmology/io.html#cosmology-io
I'm thinking the ``write_html(...)`` method would call ``cosmology.io.table.to_table()``, format the table to nice MathJax or something and then call the `QTable.write(..., format='html')`.
Edit: also, the mathjax representation of each parameter can be stored on the corresponding Parameter object, like how units have the ``format`` argument in [def_unit](https://docs.astropy.org/en/stable/api/astropy.units.def_unit.html#astropy.units.def_unit).
----------
Hi. I am a new contributor and was wondering if this was still open for contribution? I would like to look into this if possible.
Hello! The issue is still open, so feel free. 😸
@JefftheCloudDog that would be great! No one else is currently working on this feature request. If you need any help or have any questions I am happy to help. You can post here, or in the Astropy Slack cosmology channel. We also have documentation to assist in contributing at https://www.astropy.org/contribute.html#contribute-code-or-docs.
From my understanding of the request description, the high-level steps should look as such:
1. get a QTable object from the `cosmology.io.table.to_table()` function, which returns a QTable
2. format to MathJax
3. call `QTable.write()` to write
4. The registration should look like this: `readwrite_registry.register_writer("ascii.html", Cosmology, write_table)`
From the steps and observing some examples from Cosmology/io, this `write_table()` should look very similar to `write_ecsv()` from Cosmology/io/ecsv.py
Am I correct in understanding so far?
@JefftheCloudDog, correct! Looks like a great plan for implementation.
In #12983 we are working on the backend which should make the column naming easier, so each Parameter can hold its mathjax representation.
In the meantime it might be easiest to just have a `dict` of parameter name -> mathjax name.
Ah, I see. The format input is just a dict that has mathjax (or some other type) representation as values which should be an optional parameter.
I'm looking through the example of def_unit, and looks like a new type of unit is defined with the format dict.
Should `write_table()` function the same way? Are we creating a new Cosmology or QTable object for formatting?
I suppose we are essentially using [`Table.write()`](https://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.write) since a QTable object is mostly identical to a Table object.
When https://github.com/astropy/astropy/pull/12983 is merged then each parameter will hold its mathjax representation.
e.g. for latex.
```python
class FLRW(Cosmology):
H0 = Parameter(..., format={"latex": r"$H_0$"})
```
So then the columns of the ``FLRW`` -> ``QTable`` can be renamed like (note this is a quick and dirty implementation)
```python
tbl = to_table(cosmo, ...)
for name in cosmo.__parameters__:
param = getattr(cosmo.__class__, name)
new_name = param.get_format_name('latex')
tbl.rename_column(name, new_name)
```
However, https://github.com/astropy/astropy/pull/12983 is not yet merged, so the whole mathjax format can just be one central dictionary:
```python
mathjax_formats = dict(H0=..., Ode0=...)
```
Making it
```python
tbl = to_table(cosmo, ...)
for name in cosmo.__parameters__:
new_name = mathjax_formats.get(name, name) # fallback if not in formats
tbl.rename_column(name, new_name)
```
Anyway, that's just what I was suggesting as a workaround until https://github.com/astropy/astropy/pull/12983 is in.
Ok, I see. Since this deals with i/o, the new code should go to astropy\cosmology\table.py?
I see that there is already a line for `convert_registry.register_writer("astropy.table", Cosmology, to_table)`, so I was not sure if there should be a different file to register the new method.
> I see that there is already a line for convert_registry.register_writer("astropy.table", Cosmology, to_table), so I was not sure if there should be a different file to register the new method.
Yes, this should probably have a new file ``astropy/cosmology/io/html.py``.
I am writing tests now and it looks like writing fails with the following errors. I am not quite sure why these errors are appearing. I have been trying to understand why the error is occurring, since ascii.html is a built-in HTML table writer, but I am struggling a little. Can someone provide some support?
I based the first test on cosmology\io\tests\test_ecsv.py. Seems like the test is just failing on write.
```
fp = tmp_path / "test_to_html_table_bad_index.html"
write(file=fp)
```
error:
```
self = <astropy.cosmology.io.tests.test_html.TestReadWriteHTML object at 0x00000175CE162F70>, read = <function ReadWriteDirectTestBase.read.<locals>.use_read at 0x00000175CE2F3280>
write = <function ReadWriteDirectTestBase.write.<locals>.use_write at 0x00000175CE4B9A60>, tmp_path = WindowsPath('C:/Users/jeffr/AppData/Local/Temp/pytest-of-jeffr/pytest-34/test_to_html_table_bad_index_c7')
def test_to_html_table_bad_index(self, read, write, tmp_path):
"""Test if argument ``index`` is incorrect"""
fp = tmp_path / "test_to_html_table_bad_index.html"
> write(file=fp, format="ascii.html")
astropy\cosmology\io\tests\test_html.py:30:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
astropy\cosmology\io\tests\base.py:196: in use_write
return self.functions["write"](cosmo, *args, **kwargs)
astropy\cosmology\io\html.py:86: in write_table
table.write(file, overwrite=overwrite, **kwargs)
astropy\table\connect.py:129: in __call__
self.registry.write(instance, *args, **kwargs)
astropy\io\registry\core.py:354: in write
return writer(data, *args, **kwargs)
astropy\io\ascii\connect.py:26: in io_write
return write(table, filename, **kwargs)
astropy\io\ascii\ui.py:840: in write
lines = writer.write(table)
astropy\io\ascii\html.py:431: in write
new_col = Column([el[i] for el in col])
astropy\table\column.py:1076: in __new__
self = super().__new__(
astropy\table\column.py:434: in __new__
self_data = np.array(data, dtype=dtype, copy=copy)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <Quantity 0. eV>
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
> raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
E TypeError: only dimensionless scalar quantities can be converted to Python scalars
astropy\units\quantity.py:1250: TypeError
```
@JefftheCloudDog Thanks for dropping in the test output. The best way for me to help will be to see the code. To do that, it would be great if you opened a Pull Request with your code. Don't worry that the PR is not in it's final state, you can open it as Draft. Thanks!
See https://docs.astropy.org/en/latest/development/workflow/development_workflow.html if you are unsure how to make a Pull Request.
Thanks for the response! I created a [draft pull request ](https://github.com/astropy/astropy/pull/13075) for this issue. I did try to adhere to the instructions, but since this is my first contribution, there might be some mistakes. Please let me know if there are any issues.
--------------------
</issues> | 7cbba866a8c5749b90a5cb4f9877ddfad2d36037 |
Textualize__rich-2166 | 2,166 | Textualize/rich | null | 50a7ec897ed6f1f5022eac2218e3c693115ec420 | 2022-04-05T11:54:59Z | diff --git a/docs/source/progress.rst b/docs/source/progress.rst
index 59813a4738..8d4f81b2ad 100644
--- a/docs/source/progress.rst
+++ b/docs/source/progress.rst
@@ -91,7 +91,7 @@ Transient progress displays are useful if you want more minimal output in the te
Indeterminate progress
~~~~~~~~~~~~~~~~~~~~~~
-When you add a task it is automatically *started*, which means it will show a progress bar at 0% and the time remaining will be calculated from the current time. This may not work well if there is a long delay before you can start updating progress; you may need to wait for a response from a server or count files in a directory (for example). In these cases you can call :meth:`~rich.progress.Progress.add_task` with ``start=False`` which will display a pulsing animation that lets the user know something is working. This is know as an *indeterminate* progress bar. When you have the number of steps you can call :meth:`~rich.progress.Progress.start_task` which will display the progress bar at 0%, then :meth:`~rich.progress.Progress.update` as normal.
+When you add a task it is automatically *started*, which means it will show a progress bar at 0% and the time remaining will be calculated from the current time. This may not work well if there is a long delay before you can start updating progress; you may need to wait for a response from a server or count files in a directory (for example). In these cases you can call :meth:`~rich.progress.Progress.add_task` with ``start=False`` or ``total=None`` which will display a pulsing animation that lets the user know something is working. This is know as an *indeterminate* progress bar. When you have the number of steps you can call :meth:`~rich.progress.Progress.start_task` which will display the progress bar at 0%, then :meth:`~rich.progress.Progress.update` as normal.
Auto refresh
~~~~~~~~~~~~
diff --git a/rich/progress.py b/rich/progress.py
index a3b30a85bc..d7aee2664e 100644
--- a/rich/progress.py
+++ b/rich/progress.py
@@ -40,7 +40,7 @@
from typing_extensions import Literal # pragma: no cover
from . import filesize, get_console
-from .console import Console, JustifyMethod, RenderableType, Group
+from .console import Console, Group, JustifyMethod, RenderableType
from .highlighter import Highlighter
from .jupyter import JupyterMixin
from .live import Live
@@ -148,7 +148,7 @@ def track(
finished_style=finished_style,
pulse_style=pulse_style,
),
- TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
+ TaskProgressColumn(),
TimeRemainingColumn(),
)
)
@@ -651,7 +651,7 @@ def __init__(
def render(self, task: "Task") -> ProgressBar:
"""Gets a progress bar widget for a task."""
return ProgressBar(
- total=max(0, task.total),
+ total=max(0, task.total) if task.total is not None else None,
completed=max(0, task.completed),
width=None if self.bar_width is None else max(1, self.bar_width),
pulse=not task.started,
@@ -675,6 +675,43 @@ def render(self, task: "Task") -> Text:
return Text(str(delta), style="progress.elapsed")
+class TaskProgressColumn(TextColumn):
+ """A column displaying the progress of a task."""
+
+ def __init__(
+ self,
+ text_format: str = "[progress.percentage]{task.percentage:>3.0f}%",
+ text_format_no_percentage: str = "",
+ style: StyleType = "none",
+ justify: JustifyMethod = "left",
+ markup: bool = True,
+ highlighter: Optional[Highlighter] = None,
+ table_column: Optional[Column] = None,
+ ) -> None:
+ self.text_format_no_percentage = text_format_no_percentage
+ super().__init__(
+ text_format=text_format,
+ style=style,
+ justify=justify,
+ markup=markup,
+ highlighter=highlighter,
+ table_column=table_column,
+ )
+
+ def render(self, task: "Task") -> Text:
+ text_format = (
+ self.text_format_no_percentage if task.total is None else self.text_format
+ )
+ _text = text_format.format(task=task)
+ if self.markup:
+ text = Text.from_markup(_text, style=self.style, justify=self.justify)
+ else:
+ text = Text(_text, style=self.style, justify=self.justify)
+ if self.highlighter:
+ self.highlighter.highlight(text)
+ return text
+
+
class TimeRemainingColumn(ProgressColumn):
"""Renders estimated time remaining.
@@ -705,6 +742,9 @@ def render(self, task: "Task") -> Text:
task_time = task.time_remaining
style = "progress.remaining"
+ if task.total is None:
+ return Text("", style=style)
+
if task_time is None:
return Text("--:--" if self.compact else "-:--:--", style=style)
@@ -734,7 +774,7 @@ class TotalFileSizeColumn(ProgressColumn):
def render(self, task: "Task") -> Text:
"""Show data completed."""
- data_size = filesize.decimal(int(task.total))
+ data_size = filesize.decimal(int(task.total)) if task.total is not None else ""
return Text(data_size, style="progress.filesize.total")
@@ -757,7 +797,7 @@ def __init__(self, separator: str = "/", table_column: Optional[Column] = None):
def render(self, task: "Task") -> Text:
"""Show completed/total."""
completed = int(task.completed)
- total = int(task.total)
+ total = int(task.total) if task.total is not None else "?"
total_width = len(str(total))
return Text(
f"{completed:{total_width}d}{self.separator}{total}",
@@ -781,24 +821,34 @@ def __init__(
def render(self, task: "Task") -> Text:
"""Calculate common unit for completed and total."""
completed = int(task.completed)
- total = int(task.total)
+
+ unit_and_suffix_calculation_base = (
+ int(task.total) if task.total is not None else completed
+ )
if self.binary_units:
unit, suffix = filesize.pick_unit_and_suffix(
- total,
+ unit_and_suffix_calculation_base,
["bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"],
1024,
)
else:
unit, suffix = filesize.pick_unit_and_suffix(
- total,
+ unit_and_suffix_calculation_base,
["bytes", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"],
1000,
)
- completed_ratio = completed / unit
- total_ratio = total / unit
precision = 0 if unit == 1 else 1
+
+ completed_ratio = completed / unit
completed_str = f"{completed_ratio:,.{precision}f}"
- total_str = f"{total_ratio:,.{precision}f}"
+
+ if task.total is not None:
+ total = int(task.total)
+ total_ratio = total / unit
+ total_str = f"{total_ratio:,.{precision}f}"
+ else:
+ total_str = "?"
+
download_status = f"{completed_str}/{total_str} {suffix}"
download_text = Text(download_status, style="progress.download")
return download_text
@@ -839,8 +889,8 @@ class Task:
description: str
"""str: Description of the task."""
- total: float
- """str: Total number of steps in this task."""
+ total: Optional[float]
+ """Optional[float]: Total number of steps in this task."""
completed: float
"""float: Number of steps completed"""
@@ -883,8 +933,10 @@ def started(self) -> bool:
return self.start_time is not None
@property
- def remaining(self) -> float:
- """float: Get the number of steps remaining."""
+ def remaining(self) -> Optional[float]:
+ """Optional[float]: Get the number of steps remaining, if a non-None total was set."""
+ if self.total is None:
+ return None
return self.total - self.completed
@property
@@ -903,7 +955,7 @@ def finished(self) -> bool:
@property
def percentage(self) -> float:
- """float: Get progress of task as a percentage."""
+ """float: Get progress of task as a percentage. If a None total was set, returns 0"""
if not self.total:
return 0.0
completed = (self.completed / self.total) * 100.0
@@ -936,7 +988,10 @@ def time_remaining(self) -> Optional[float]:
speed = self.speed
if not speed:
return None
- estimate = ceil(self.remaining / speed)
+ remaining = self.remaining
+ if remaining is None:
+ return None
+ estimate = ceil(remaining / speed)
return estimate
def _reset(self) -> None:
@@ -1027,7 +1082,7 @@ def get_default_columns(cls) -> Tuple[ProgressColumn, ...]:
return (
TextColumn("[progress.description]{task.description}"),
BarColumn(),
- TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
+ TaskProgressColumn(),
TimeRemainingColumn(),
)
@@ -1358,7 +1413,11 @@ def update(
popleft()
if update_completed > 0:
_progress.append(ProgressSample(current_time, update_completed))
- if task.completed >= task.total and task.finished_time is None:
+ if (
+ task.total is not None
+ and task.completed >= task.total
+ and task.finished_time is None
+ ):
task.finished_time = task.elapsed
if refresh:
@@ -1423,7 +1482,11 @@ def advance(self, task_id: TaskID, advance: float = 1) -> None:
while len(_progress) > 1000:
popleft()
_progress.append(ProgressSample(current_time, update_completed))
- if task.completed >= task.total and task.finished_time is None:
+ if (
+ task.total is not None
+ and task.completed >= task.total
+ and task.finished_time is None
+ ):
task.finished_time = task.elapsed
task.finished_speed = task.speed
@@ -1484,7 +1547,7 @@ def add_task(
self,
description: str,
start: bool = True,
- total: float = 100.0,
+ total: Optional[float] = 100.0,
completed: int = 0,
visible: bool = True,
**fields: Any,
@@ -1495,7 +1558,8 @@ def add_task(
description (str): A description of the task.
start (bool, optional): Start the task immediately (to calculate elapsed time). If set to False,
you will need to call `start` manually. Defaults to True.
- total (float, optional): Number of total steps in the progress if know. Defaults to 100.
+ total (float, optional): Number of total steps in the progress if known.
+ Set to None to render a pulsing animation. Defaults to 100.
completed (int, optional): Number of steps completed so far.. Defaults to 0.
visible (bool, optional): Enable display of the task. Defaults to True.
**fields (str): Additional data fields required for rendering.
@@ -1585,12 +1649,12 @@ def remove_task(self, task_id: TaskID) -> None:
*Progress.get_default_columns(),
TimeElapsedColumn(),
console=console,
- transient=True,
+ transient=False,
) as progress:
task1 = progress.add_task("[red]Downloading", total=1000)
task2 = progress.add_task("[green]Processing", total=1000)
- task3 = progress.add_task("[yellow]Thinking", total=1000, start=False)
+ task3 = progress.add_task("[yellow]Thinking", total=None)
while not progress.finished:
progress.update(task1, advance=0.5)
diff --git a/rich/progress_bar.py b/rich/progress_bar.py
index 1797b5f786..9c3a4f25a2 100644
--- a/rich/progress_bar.py
+++ b/rich/progress_bar.py
@@ -19,10 +19,10 @@ class ProgressBar(JupyterMixin):
"""Renders a (progress) bar. Used by rich.progress.
Args:
- total (float, optional): Number of steps in the bar. Defaults to 100.
+ total (float, optional): Number of steps in the bar. Defaults to 100. Set to None to render a pulsing animation.
completed (float, optional): Number of steps completed. Defaults to 0.
width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
- pulse (bool, optional): Enable pulse effect. Defaults to False.
+ pulse (bool, optional): Enable pulse effect. Defaults to False. Will pulse if a None total was passed.
style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done".
@@ -32,7 +32,7 @@ class ProgressBar(JupyterMixin):
def __init__(
self,
- total: float = 100.0,
+ total: Optional[float] = 100.0,
completed: float = 0,
width: Optional[int] = None,
pulse: bool = False,
@@ -58,8 +58,10 @@ def __repr__(self) -> str:
return f"<Bar {self.completed!r} of {self.total!r}>"
@property
- def percentage_completed(self) -> float:
+ def percentage_completed(self) -> Optional[float]:
"""Calculate percentage complete."""
+ if self.total is None:
+ return None
completed = (self.completed / self.total) * 100.0
completed = min(100, max(0.0, completed))
return completed
@@ -157,23 +159,29 @@ def __rich_console__(
width = min(self.width or options.max_width, options.max_width)
ascii = options.legacy_windows or options.ascii_only
- if self.pulse:
+ should_pulse = self.pulse or self.total is None
+ if should_pulse:
yield from self._render_pulse(console, width, ascii=ascii)
return
- completed = min(self.total, max(0, self.completed))
+ completed: Optional[float] = (
+ min(self.total, max(0, self.completed)) if self.total is not None else None
+ )
bar = "-" if ascii else "━"
half_bar_right = " " if ascii else "╸"
half_bar_left = " " if ascii else "╺"
complete_halves = (
- int(width * 2 * completed / self.total) if self.total else width * 2
+ int(width * 2 * completed / self.total)
+ if self.total and completed is not None
+ else width * 2
)
bar_count = complete_halves // 2
half_bar_count = complete_halves % 2
style = console.get_style(self.style)
+ is_finished = self.total is None or self.completed >= self.total
complete_style = console.get_style(
- self.complete_style if self.completed < self.total else self.finished_style
+ self.finished_style if is_finished else self.complete_style
)
_Segment = Segment
if bar_count:
| diff --git a/tests/test_progress.py b/tests/test_progress.py
index d3c6171c9c..61e11a9cd8 100644
--- a/tests/test_progress.py
+++ b/tests/test_progress.py
@@ -3,33 +3,33 @@
import io
import os
import tempfile
-from time import sleep
from types import SimpleNamespace
import pytest
import rich.progress
-from rich.progress_bar import ProgressBar
from rich.console import Console
from rich.highlighter import NullHighlighter
from rich.progress import (
BarColumn,
- FileSizeColumn,
- TotalFileSizeColumn,
DownloadColumn,
- TransferSpeedColumn,
- RenderableColumn,
- SpinnerColumn,
+ FileSizeColumn,
MofNCompleteColumn,
Progress,
+ RenderableColumn,
+ SpinnerColumn,
Task,
+ TaskID,
+ TaskProgressColumn,
TextColumn,
TimeElapsedColumn,
TimeRemainingColumn,
- track,
+ TotalFileSizeColumn,
+ TransferSpeedColumn,
_TrackThread,
- TaskID,
+ track,
)
+from rich.progress_bar import ProgressBar
from rich.text import Text
@@ -104,7 +104,7 @@ class FakeTask(Task):
],
)
def test_compact_time_remaining_column(task_time, formatted):
- task = SimpleNamespace(finished=False, time_remaining=task_time)
+ task = SimpleNamespace(finished=False, time_remaining=task_time, total=100)
column = TimeRemainingColumn(compact=True)
assert str(column.render(task)) == formatted
@@ -114,7 +114,7 @@ def test_time_remaining_column_elapsed_when_finished():
task_time = 71
formatted = "0:01:11"
- task = SimpleNamespace(finished=True, finished_time=task_time)
+ task = SimpleNamespace(finished=True, finished_time=task_time, total=100)
column = TimeRemainingColumn(elapsed_when_finished=True)
assert str(column.render(task)) == formatted
@@ -247,6 +247,31 @@ def test_expand_bar() -> None:
assert render_result == expected
+def test_progress_with_none_total_renders_a_pulsing_bar() -> None:
+ console = Console(
+ file=io.StringIO(),
+ force_terminal=True,
+ width=10,
+ color_system="truecolor",
+ legacy_windows=False,
+ _environ={},
+ )
+ progress = Progress(
+ BarColumn(bar_width=None),
+ console=console,
+ get_time=lambda: 1.0,
+ auto_refresh=False,
+ )
+ progress.add_task("foo", total=None)
+ with progress:
+ pass
+ expected = "\x1b[?25l\x1b[38;2;153;48;86m━\x1b[0m\x1b[38;2;183;44;94m━\x1b[0m\x1b[38;2;209;42;102m━\x1b[0m\x1b[38;2;230;39;108m━\x1b[0m\x1b[38;2;244;38;112m━\x1b[0m\x1b[38;2;249;38;114m━\x1b[0m\x1b[38;2;244;38;112m━\x1b[0m\x1b[38;2;230;39;108m━\x1b[0m\x1b[38;2;209;42;102m━\x1b[0m\x1b[38;2;183;44;94m━\x1b[0m\r\x1b[2K\x1b[38;2;153;48;86m━\x1b[0m\x1b[38;2;183;44;94m━\x1b[0m\x1b[38;2;209;42;102m━\x1b[0m\x1b[38;2;230;39;108m━\x1b[0m\x1b[38;2;244;38;112m━\x1b[0m\x1b[38;2;249;38;114m━\x1b[0m\x1b[38;2;244;38;112m━\x1b[0m\x1b[38;2;230;39;108m━\x1b[0m\x1b[38;2;209;42;102m━\x1b[0m\x1b[38;2;183;44;94m━\x1b[0m\n\x1b[?25h"
+ render_result = console.file.getvalue()
+ print("RESULT\n", repr(render_result))
+ print("EXPECTED\n", repr(expected))
+ assert render_result == expected
+
+
def test_render() -> None:
expected = "\x1b[?25lfoo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 0%\x1b[0m \x1b[36m-:--:--\x1b[0m\nbar \x1b[38;2;249;38;114m━━━━━━━━━━━━━━━━━━━━━\x1b[0m\x1b[38;5;237m╺\x1b[0m\x1b[38;5;237m━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 53%\x1b[0m \x1b[36m-:--:--\x1b[0m\nfoo2 \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m100%\x1b[0m \x1b[36m0:00:00\x1b[0m\r\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2Kfoo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 0%\x1b[0m \x1b[36m-:--:--\x1b[0m\nbar \x1b[38;2;249;38;114m━━━━━━━━━━━━━━━━━━━━━\x1b[0m\x1b[38;5;237m╺\x1b[0m\x1b[38;5;237m━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m 53%\x1b[0m \x1b[36m-:--:--\x1b[0m\nfoo2 \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[35m100%\x1b[0m \x1b[36m0:00:00\x1b[0m\n\x1b[?25h"
render_result = render_progress()
@@ -374,7 +399,7 @@ def test_using_default_columns() -> None:
expected_default_types = [
TextColumn,
BarColumn,
- TextColumn,
+ TaskProgressColumn,
TimeRemainingColumn,
]
| diff --git a/docs/source/progress.rst b/docs/source/progress.rst
index 59813a4738..8d4f81b2ad 100644
--- a/docs/source/progress.rst
+++ b/docs/source/progress.rst
@@ -91,7 +91,7 @@ Transient progress displays are useful if you want more minimal output in the te
Indeterminate progress
~~~~~~~~~~~~~~~~~~~~~~
-When you add a task it is automatically *started*, which means it will show a progress bar at 0% and the time remaining will be calculated from the current time. This may not work well if there is a long delay before you can start updating progress; you may need to wait for a response from a server or count files in a directory (for example). In these cases you can call :meth:`~rich.progress.Progress.add_task` with ``start=False`` which will display a pulsing animation that lets the user know something is working. This is know as an *indeterminate* progress bar. When you have the number of steps you can call :meth:`~rich.progress.Progress.start_task` which will display the progress bar at 0%, then :meth:`~rich.progress.Progress.update` as normal.
+When you add a task it is automatically *started*, which means it will show a progress bar at 0% and the time remaining will be calculated from the current time. This may not work well if there is a long delay before you can start updating progress; you may need to wait for a response from a server or count files in a directory (for example). In these cases you can call :meth:`~rich.progress.Progress.add_task` with ``start=False`` or ``total=None`` which will display a pulsing animation that lets the user know something is working. This is know as an *indeterminate* progress bar. When you have the number of steps you can call :meth:`~rich.progress.Progress.start_task` which will display the progress bar at 0%, then :meth:`~rich.progress.Progress.update` as normal.
Auto refresh
~~~~~~~~~~~~
| [
{
"components": [
{
"doc": "A column displaying the progress of a task.",
"lines": [
678,
712
],
"name": "TaskProgressColumn",
"signature": "class TaskProgressColumn(TextColumn):",
"type": "class"
},
{
"doc": "",
... | [
"tests/test_progress.py::test_bar_columns",
"tests/test_progress.py::test_text_column",
"tests/test_progress.py::test_time_elapsed_column",
"tests/test_progress.py::test_time_remaining_column",
"tests/test_progress.py::test_compact_time_remaining_column[None---:--]",
"tests/test_progress.py::test_compact_... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
[progress] The `total` we pass to a Progress renderable can now be `None`
## Type of changes
- [ ] Bug fix
- [x] New feature: https://github.com/Textualize/rich/issues/1054
- [ ] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [x] I've run the latest [black](https://github.com/psf/black) with default args on new code.
- [x] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [x] I've added tests for new code.
- [x] I accept that @willmcgugan may be pedantic in the code review.
## Description
With this PR we now render a "pulse" progress bar when the given `total` is None.
Which is a new possibility we now have: this parameter used to be a `float`, and is now an `Optional[float]` - so it shouldn't bring any compatibility issues, as the type is just widened to a value that was not accepted according to the documentation, and which would raise exceptions before if it was used?
#### Design considerations
To carry the semantic of _"we don't know the 'total' value for my progress bar"_, instead of `None` we could have used other kinds of values, such as:
- A negative value, like `total=-1`. The benefit would have been that the signature would still have been `float` , but the drawback is that the meaning of this value is less clear when debugging
- A sentinel value, such as `total=rich.progress.UNDETERMINED` : probably the most explicit option, but a bit more cumbersome for our users than a good old `None`? :thinking:
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rich/progress.py]
(definition of TaskProgressColumn:)
class TaskProgressColumn(TextColumn):
"""A column displaying the progress of a task."""
(definition of TaskProgressColumn.__init__:)
def __init__( self, text_format: str = "[progress.percentage]{task.percentage:>3.0f}%", text_format_no_percentage: str = "", style: StyleType = "none", justify: JustifyMethod = "left", markup: bool = True, highlighter: Optional[Highlighter] = None, table_column: Optional[Column] = None, ) -> None:
(definition of TaskProgressColumn.render:)
def render(self, task: "Task") -> Text:
[end of new definitions in rich/progress.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | b0661de34bab35af9b4b1d3ba8e28b186b225e84 | |
conan-io__conan-10972 | 10,972 | conan-io/conan | null | b295f339b25a7306d0cd51b483c2a486bb57de40 | 2022-04-05T11:19:29Z | diff --git a/conan/tools/microsoft/msbuilddeps.py b/conan/tools/microsoft/msbuilddeps.py
index eb27b71c8b7..76a438e986f 100644
--- a/conan/tools/microsoft/msbuilddeps.py
+++ b/conan/tools/microsoft/msbuilddeps.py
@@ -184,8 +184,7 @@ def join_paths(paths):
'system_libs': "".join([add_valid_ext(sys_dep) for sys_dep in cpp_info.system_libs]),
'definitions': "".join("%s;" % d for d in cpp_info.defines),
'compiler_flags': " ".join(cpp_info.cxxflags + cpp_info.cflags),
- 'linker_flags': " ".join(cpp_info.sharedlinkflags),
- 'exe_flags': " ".join(cpp_info.exelinkflags),
+ 'linker_flags': " ".join(cpp_info.sharedlinkflags + cpp_info.exelinkflags),
'dependencies': ";".join(deps) if not build else "",
'host_context': not build
}
diff --git a/conan/tools/microsoft/toolchain.py b/conan/tools/microsoft/toolchain.py
index 128797a61b7..dc1e3fc27ff 100644
--- a/conan/tools/microsoft/toolchain.py
+++ b/conan/tools/microsoft/toolchain.py
@@ -2,6 +2,8 @@
import textwrap
from xml.dom import minidom
+from jinja2 import Template
+
from conan.tools._check_build_profile import check_using_build_profile
from conan.tools.build import build_jobs
from conan.tools.intel.intel_cc import IntelCC
@@ -14,10 +16,37 @@ class MSBuildToolchain(object):
filename = "conantoolchain.props"
+ _config_toolchain_props = textwrap.dedent("""\
+ <?xml version="1.0" encoding="utf-8"?>
+ <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemDefinitionGroup>
+ <ClCompile>
+ <PreprocessorDefinitions>{{ defines }}%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalOptions>{{ compiler_flags }} %(AdditionalOptions)</AdditionalOptions>
+ <RuntimeLibrary>{{ runtime_library }}</RuntimeLibrary>
+ <LanguageStandard>{{ cppstd }}</LanguageStandard>{{ parallel }}{{ compile_options }}
+ </ClCompile>
+ <Link>
+ <AdditionalOptions>{{ linker_flags }} %(AdditionalOptions)</AdditionalOptions>
+ </Link>
+ <ResourceCompile>
+ <PreprocessorDefinitions>{{ defines }}%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalOptions>{{ compiler_flags }} %(AdditionalOptions)</AdditionalOptions>
+ </ResourceCompile>
+ </ItemDefinitionGroup>
+ <PropertyGroup Label="Configuration">
+ <PlatformToolset>{{ toolset }}</PlatformToolset>
+ </PropertyGroup>
+ </Project>
+ """)
+
def __init__(self, conanfile):
self._conanfile = conanfile
self.preprocessor_definitions = {}
self.compile_options = {}
+ self.cxxflags = []
+ self.cflags = []
+ self.ldflags = []
self.configuration = conanfile.settings.build_type
self.runtime_library = self._runtime_library(conanfile.settings)
self.cppstd = conanfile.settings.get_safe("compiler.cppstd")
@@ -37,6 +66,7 @@ def _name_condition(self, settings):
def generate(self):
name, condition = self._name_condition(self._conanfile.settings)
config_filename = "conantoolchain{}.props".format(name)
+ # Writing the props files
self._write_config_toolchain(config_filename)
self._write_main_toolchain(config_filename, condition)
if self._conanfile.settings.get_safe("compiler") == "intel-cc":
@@ -94,35 +124,19 @@ def _runtime_library(settings):
"MDd": "MultiThreadedDebugDLL"}.get(runtime, "")
return runtime_library
- def _write_config_toolchain(self, config_filename):
+ @property
+ def context_config_toolchain(self):
def format_macro(key, value):
return '%s=%s' % (key, value) if value is not None else key
- toolchain_file = textwrap.dedent("""\
- <?xml version="1.0" encoding="utf-8"?>
- <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <ItemDefinitionGroup>
- <ClCompile>
- <PreprocessorDefinitions>
- {};%(PreprocessorDefinitions)
- </PreprocessorDefinitions>
- <RuntimeLibrary>{}</RuntimeLibrary>
- <LanguageStandard>{}</LanguageStandard>{}{}
- </ClCompile>
- <ResourceCompile>
- <PreprocessorDefinitions>
- {};%(PreprocessorDefinitions)
- </PreprocessorDefinitions>
- </ResourceCompile>
- </ItemDefinitionGroup>
- <PropertyGroup Label="Configuration">
- <PlatformToolset>{}</PlatformToolset>
- </PropertyGroup>
- </Project>
- """)
- preprocessor_definitions = ";".join([format_macro(k, v)
- for k, v in self.preprocessor_definitions.items()])
+ cxxflags, cflags, defines, sharedlinkflags, exelinkflags = self._get_extra_flags()
+ preprocessor_definitions = "".join(["%s;" % format_macro(k, v)
+ for k, v in self.preprocessor_definitions.items()])
+ defines = preprocessor_definitions + "".join("%s;" % d for d in defines)
+ self.cxxflags.extend(cxxflags)
+ self.cflags.extend(cflags)
+ self.ldflags.extend(sharedlinkflags + exelinkflags)
cppstd = "stdcpp%s" % self.cppstd if self.cppstd else ""
runtime_library = self.runtime_library
@@ -138,10 +152,21 @@ def format_macro(key, value):
"\n <ProcessorNumber>{}</ProcessorNumber>".format(njobs)])
compile_options = "".join("\n <{k}>{v}</{k}>".format(k=k, v=v)
for k, v in self.compile_options.items())
- config_props = toolchain_file.format(preprocessor_definitions, runtime_library, cppstd,
- parallel, compile_options, preprocessor_definitions,
- toolset)
+ return {
+ 'defines': defines,
+ 'compiler_flags': " ".join(self.cxxflags + self.cflags),
+ 'linker_flags': " ".join(self.ldflags),
+ "cppstd": cppstd,
+ "runtime_library": runtime_library,
+ "toolset": toolset,
+ "compile_options": compile_options,
+ "parallel": parallel
+ }
+
+ def _write_config_toolchain(self, config_filename):
config_filepath = os.path.join(self._conanfile.generators_folder, config_filename)
+ config_props = Template(self._config_toolchain_props, trim_blocks=True,
+ lstrip_blocks=True).render(**self.context_config_toolchain)
self._conanfile.output.info("MSBuildToolchain created %s" % config_filename)
save(config_filepath, config_props)
@@ -187,3 +212,12 @@ def _write_main_toolchain(self, config_filename, condition):
conan_toolchain = "\n".join(line for line in conan_toolchain.splitlines() if line.strip())
self._conanfile.output.info("MSBuildToolchain writing {}".format(self.filename))
save(main_toolchain_path, conan_toolchain)
+
+ def _get_extra_flags(self):
+ # Now, it's time to get all the flags defined by the user
+ cxxflags = self._conanfile.conf.get("tools.build:cxxflags", default=[], check_type=list)
+ cflags = self._conanfile.conf.get("tools.build:cflags", default=[], check_type=list)
+ sharedlinkflags = self._conanfile.conf.get("tools.build:sharedlinkflags", default=[], check_type=list)
+ exelinkflags = self._conanfile.conf.get("tools.build:exelinkflags", default=[], check_type=list)
+ defines = self._conanfile.conf.get("tools.build:defines", default=[], check_type=list)
+ return cxxflags, cflags, defines, sharedlinkflags, exelinkflags
| diff --git a/conans/test/functional/toolchains/microsoft/test_msbuildtoolchain.py b/conans/test/functional/toolchains/microsoft/test_msbuildtoolchain.py
new file mode 100644
index 00000000000..32fc0799bfc
--- /dev/null
+++ b/conans/test/functional/toolchains/microsoft/test_msbuildtoolchain.py
@@ -0,0 +1,50 @@
+import platform
+import textwrap
+import os
+
+import pytest
+try:
+ from unittest.mock import MagicMock
+except:
+ from mock import MagicMock
+
+from conan.tools.files import replace_in_file
+from conans.test.utils.tools import TestClient
+
+toolchain_props = """
+ <ImportGroup Label="PropertySheets">
+ <Import Project="conan\\conantoolchain_release_x64.props" />
+"""
+
+
+@pytest.mark.skipif(platform.system() not in ["Windows"], reason="Requires Windows")
+def test_msbuildtoolchain_props_with_extra_flags():
+ """
+ Real test which is injecting some compiler/linker options and other dummy defines and
+ checking that they are being processed somehow.
+
+ Expected result: everything was built successfully.
+ """
+ profile = textwrap.dedent("""\
+ include(default)
+
+ [conf]
+ tools.build:cxxflags=["/analyze:quiet"]
+ tools.build:cflags+=["/doc"]
+ tools.build:sharedlinkflags+=["/VERBOSE:UNUSEDLIBS"]
+ tools.build:exelinkflags+=["/PDB:mypdbfile"]
+ tools.build:defines+=["DEF1", "DEF2"]
+ """)
+ client = TestClient(path_with_spaces=False)
+ client.run("new hello/0.1 --template=msbuild_exe")
+ client.save({
+ "myprofile": profile
+ })
+ # Let's import manually the created conantoolchain_release_x64.props
+ replace_in_file(MagicMock(), os.path.join(client.current_folder, "hello.vcxproj"),
+ r' <ImportGroup Label="PropertySheets">', toolchain_props)
+ client.run("create . -pr myprofile -tf None")
+ assert "/analyze:quiet /doc src/hello.cpp" in client.out
+ assert r"/VERBOSE:UNUSEDLIBS /PDB:mypdbfile x64\Release\hello.obj" in client.out
+ assert "/D DEF1 /D DEF2" in client.out
+ assert "Build succeeded." in client.out
diff --git a/conans/test/integration/toolchains/microsoft/test_msbuildtoolchain.py b/conans/test/integration/toolchains/microsoft/test_msbuildtoolchain.py
new file mode 100644
index 00000000000..c7f8fe903ce
--- /dev/null
+++ b/conans/test/integration/toolchains/microsoft/test_msbuildtoolchain.py
@@ -0,0 +1,44 @@
+import os
+import textwrap
+
+from conans.test.utils.tools import TestClient
+
+
+def test_msbuildtoolchain_props_with_extra_flags():
+ """
+ Simple test checking that conantoolchain_release_x64.props is adding all the expected
+ flags and preprocessor definitions
+ """
+ profile = textwrap.dedent("""\
+ include(default)
+ [conf]
+ tools.build:cxxflags=["--flag1", "--flag2"]
+ tools.build:cflags+=["--flag3", "--flag4"]
+ tools.build:sharedlinkflags+=["--flag5"]
+ tools.build:exelinkflags+=["--flag6"]
+ tools.build:defines+=["DEF1", "DEF2"]
+ """)
+ client = TestClient(path_with_spaces=False)
+ client.run("new hello/0.1 --template=msbuild_lib")
+ client.save({
+ "myprofile": profile
+ })
+ # Local flow works
+ client.run("install . -pr myprofile -if=install")
+ toolchain = client.load(os.path.join("conan", "conantoolchain_release_x64.props"))
+ expected_cl_compile = """
+ <ClCompile>
+ <PreprocessorDefinitions>DEF1;DEF2;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalOptions>--flag1 --flag2 --flag3 --flag4 %(AdditionalOptions)</AdditionalOptions>"""
+ expected_link = """
+ <Link>
+ <AdditionalOptions>--flag5 --flag6 %(AdditionalOptions)</AdditionalOptions>
+ </Link>"""
+ expected_resource_compile = """
+ <ResourceCompile>
+ <PreprocessorDefinitions>DEF1;DEF2;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalOptions>--flag1 --flag2 --flag3 --flag4 %(AdditionalOptions)</AdditionalOptions>
+ </ResourceCompile>"""
+ assert expected_cl_compile in toolchain
+ assert expected_link in toolchain
+ assert expected_resource_compile in toolchain
diff --git a/conans/test/unittests/tools/microsoft/test_msbuild.py b/conans/test/unittests/tools/microsoft/test_msbuild.py
index 8408ff4cda1..89afb227a62 100644
--- a/conans/test/unittests/tools/microsoft/test_msbuild.py
+++ b/conans/test/unittests/tools/microsoft/test_msbuild.py
@@ -139,7 +139,9 @@ def test_resource_compile():
<PreprocessorDefinitions>
MYTEST=MYVALUE;%(PreprocessorDefinitions)
</PreprocessorDefinitions>
+ <AdditionalOptions> %(AdditionalOptions)</AdditionalOptions>
</ResourceCompile>"""
+
props_file = load(props_file) # Remove all blanks and CR to compare
props_file = "".join(s.strip() for s in props_file.splitlines())
assert "".join(s.strip() for s in expected.splitlines()) in props_file
@@ -213,3 +215,50 @@ def test_is_msvc_static_runtime(compiler, shared, runtime, build_type, expected)
"cppstd": "17"})
conanfile = MockConanfile(settings, options)
assert is_msvc_static_runtime(conanfile) == expected
+
+
+def test_msbuildtoolchain_changing_flags_via_attributes():
+ test_folder = temp_folder()
+
+ settings = Settings({"build_type": ["Release"],
+ "compiler": {"msvc": {"version": ["193"], "cppstd": ["20"]}},
+ "os": ["Windows"],
+ "arch": ["x86_64"]})
+ conanfile = ConanFile(Mock(), None)
+ conanfile.folders.set_base_generators(test_folder)
+ conanfile.folders.set_base_install(test_folder)
+ conanfile.conf = Conf()
+ conanfile.conf["tools.microsoft.msbuild:installation_path"] = "."
+ conanfile.settings = "os", "compiler", "build_type", "arch"
+ conanfile.settings_build = settings
+ conanfile.initialize(settings, EnvValues())
+ conanfile.settings.build_type = "Release"
+ conanfile.settings.compiler = "msvc"
+ conanfile.settings.compiler.version = "193"
+ conanfile.settings.compiler.cppstd = "20"
+ conanfile.settings.os = "Windows"
+ conanfile.settings.arch = "x86_64"
+
+ msbuild = MSBuildToolchain(conanfile)
+ msbuild.cxxflags.append("/flag1")
+ msbuild.cflags.append("/flag2")
+ msbuild.ldflags.append("/link1")
+ msbuild.generate()
+ toolchain = load(os.path.join(test_folder, "conantoolchain_release_x64.props"))
+
+ expected_cl_compile = """
+ <ClCompile>
+ <PreprocessorDefinitions>%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalOptions>/flag1 /flag2 %(AdditionalOptions)</AdditionalOptions>"""
+ expected_link = """
+ <Link>
+ <AdditionalOptions>/link1 %(AdditionalOptions)</AdditionalOptions>
+ </Link>"""
+ expected_resource_compile = """
+ <ResourceCompile>
+ <PreprocessorDefinitions>%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalOptions>/flag1 /flag2 %(AdditionalOptions)</AdditionalOptions>
+ </ResourceCompile>"""
+ assert expected_cl_compile in toolchain
+ assert expected_link in toolchain
+ assert expected_resource_compile in toolchain
| [
{
"components": [
{
"doc": "",
"lines": [
128,
163
],
"name": "MSBuildToolchain.context_config_toolchain",
"signature": "def context_config_toolchain(self):",
"type": "function"
},
{
"doc": "",
"lines": [
... | [
"conans/test/integration/toolchains/microsoft/test_msbuildtoolchain.py::test_msbuildtoolchain_props_with_extra_flags",
"conans/test/unittests/tools/microsoft/test_msbuild.py::test_resource_compile",
"conans/test/unittests/tools/microsoft/test_msbuild.py::test_msbuildtoolchain_changing_flags_via_attributes"
] | [
"conans/test/unittests/tools/microsoft/test_msbuild.py::test_msbuild_cpu_count",
"conans/test/unittests/tools/microsoft/test_msbuild.py::test_msbuild_toolset",
"conans/test/unittests/tools/microsoft/test_msbuild.py::test_msbuild_toolset_for_intel_cc[icx-Intel",
"conans/test/unittests/tools/microsoft/test_msbu... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
[MSBuildToolchain][conf] Adding extra flags to `MSBuildToolchain`
Changelog: Feature: Added `cxxflags`, `cflags`, and `ldflags` attributes to `MSBuildToolchain`.
Changelog: Feature: Added mechanism to inject extra flags to `MSBuildToolchain` via `[conf]`.
Closes: https://github.com/conan-io/conan/issues/10885
Docs: https://github.com/conan-io/docs/pull/2507
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/microsoft/toolchain.py]
(definition of MSBuildToolchain.context_config_toolchain:)
def context_config_toolchain(self):
(definition of MSBuildToolchain.context_config_toolchain.format_macro:)
def format_macro(key, value):
(definition of MSBuildToolchain._get_extra_flags:)
def _get_extra_flags(self):
[end of new definitions in conan/tools/microsoft/toolchain.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
Textualize__textual-360 | 360 | Textualize/textual | null | ca77f7e24dc3820b7c64085e346e56a3659c4a11 | 2022-04-04T19:40:01Z | diff --git a/src/textual/color.py b/src/textual/color.py
index 2154216cbd..00a51e1969 100644
--- a/src/textual/color.py
+++ b/src/textual/color.py
@@ -33,6 +33,14 @@ class HSV(NamedTuple):
v: float
+class Lab(NamedTuple):
+ """A color in CIE-L*ab format."""
+
+ L: float
+ a: float
+ b: float
+
+
RE_COLOR = re.compile(
r"""^
\#([0-9a-fA-F]{6})$|
@@ -367,6 +375,60 @@ def _get_style(self) -> Style:
)
+def rgb_to_lab(rgb: Color) -> Lab:
+ """Convert an RGB color to the CIE-L*ab format.
+
+ Uses the standard RGB color space with a D65/2⁰ standard illuminant.
+ Conversion passes through the XYZ color space.
+ Cf. http://www.easyrgb.com/en/math.php.
+ """
+
+ r, g, b = rgb.r / 255, rgb.g / 255, rgb.b / 255
+
+ r = pow((r + 0.055) / 1.055, 2.4) if r > 0.04045 else r / 12.92
+ g = pow((g + 0.055) / 1.055, 2.4) if g > 0.04045 else g / 12.92
+ b = pow((b + 0.055) / 1.055, 2.4) if b > 0.04045 else b / 12.92
+
+ x = (r * 41.24 + g * 35.76 + b * 18.05) / 95.047
+ y = (r * 21.26 + g * 71.52 + b * 7.22) / 100
+ z = (r * 1.93 + g * 11.92 + b * 95.05) / 108.883
+
+ off = 16 / 116
+ x = pow(x, 1 / 3) if x > 0.008856 else 7.787 * x + off
+ y = pow(y, 1 / 3) if y > 0.008856 else 7.787 * y + off
+ z = pow(z, 1 / 3) if z > 0.008856 else 7.787 * z + off
+
+ return Lab(116 * y - 16, 500 * (x - y), 200 * (y - z))
+
+
+def lab_to_rgb(lab: Lab) -> Color:
+ """Convert a CIE-L*ab color to RGB.
+
+ Uses the standard RGB color space with a D65/2⁰ standard illuminant.
+ Conversion passes through the XYZ color space.
+ Cf. http://www.easyrgb.com/en/math.php.
+ """
+
+ y = (lab.L + 16) / 116
+ x = lab.a / 500 + y
+ z = y - lab.b / 200
+
+ off = 16 / 116
+ y = pow(y, 3) if y > 0.2068930344 else (y - off) / 7.787
+ x = 0.95047 * pow(x, 3) if x > 0.2068930344 else 0.122059 * (x - off)
+ z = 1.08883 * pow(z, 3) if z > 0.2068930344 else 0.139827 * (z - off)
+
+ r = x * 3.2406 + y * -1.5372 + z * -0.4986
+ g = x * -0.9689 + y * 1.8758 + z * 0.0415
+ b = x * 0.0557 + y * -0.2040 + z * 1.0570
+
+ r = 1.055 * pow(r, 1 / 2.4) - 0.055 if r > 0.0031308 else 12.92 * r
+ g = 1.055 * pow(g, 1 / 2.4) - 0.055 if g > 0.0031308 else 12.92 * g
+ b = 1.055 * pow(b, 1 / 2.4) - 0.055 if b > 0.0031308 else 12.92 * b
+
+ return Color(int(r * 255), int(g * 255), int(b * 255))
+
+
if __name__ == "__main__":
from rich import print
| diff --git a/tests/test_color.py b/tests/test_color.py
index ac09625c4b..d9bfe3ca4b 100644
--- a/tests/test_color.py
+++ b/tests/test_color.py
@@ -3,7 +3,7 @@
from rich.color import Color as RichColor
from rich.text import Text
-from textual.color import Color, ColorPair
+from textual.color import Color, ColorPair, Lab, lab_to_rgb, rgb_to_lab
@pytest.mark.parametrize(
@@ -76,3 +76,52 @@ def test_hls():
assert red.hls == pytest.approx(
(0.9888888888888889, 0.43137254901960786, 0.818181818181818)
)
+
+
+# Computed with http://www.easyrgb.com/en/convert.php,
+# (r, g, b) values in sRGB to (L*, a*, b*) values in CIE-L*ab.
+RGB_LAB_DATA = [
+ (10, 23, 73, 10.245, 15.913, -32.672),
+ (200, 34, 123, 45.438, 67.750, -8.008),
+ (0, 0, 0, 0, 0, 0),
+ (255, 255, 255, 100, 0, 0),
+]
+
+
+@pytest.mark.parametrize(
+ "r, g, b, L_, a_, b_",
+ RGB_LAB_DATA,
+)
+def test_rgb_to_lab(r, g, b, L_, a_, b_):
+ """Test conversion from the RGB color space to CIE-L*ab."""
+ rgb = Color(r, g, b)
+ lab = rgb_to_lab(rgb)
+ assert lab.L == pytest.approx(L_, abs=0.1)
+ assert lab.a == pytest.approx(a_, abs=0.1)
+ assert lab.b == pytest.approx(b_, abs=0.1)
+
+
+@pytest.mark.parametrize(
+ "r, g, b, L_, a_, b_",
+ RGB_LAB_DATA,
+)
+def test_lab_to_rgb(r, g, b, L_, a_, b_):
+ """Test conversion from the CIE-L*ab color space to RGB."""
+
+ lab = Lab(L_, a_, b_)
+ rgb = lab_to_rgb(lab)
+ assert rgb.r == pytest.approx(r, abs=1)
+ assert rgb.g == pytest.approx(g, abs=1)
+ assert rgb.b == pytest.approx(b, abs=1)
+
+
+def test_rgb_lab_rgb_roundtrip():
+ """Test RGB -> CIE-L*ab -> RGB color conversion roundtripping."""
+
+ for r in range(0, 256, 4):
+ for g in range(0, 256, 4):
+ for b in range(0, 256, 4):
+ c_ = lab_to_rgb(rgb_to_lab(Color(r, g, b)))
+ assert c_.r == pytest.approx(r, abs=1)
+ assert c_.g == pytest.approx(g, abs=1)
+ assert c_.b == pytest.approx(b, abs=1)
| [
{
"components": [
{
"doc": "A color in CIE-L*ab format.",
"lines": [
36,
41
],
"name": "Lab",
"signature": "class Lab(NamedTuple):",
"type": "class"
},
{
"doc": "Convert an RGB color to the CIE-L*ab format.\n\nUses the... | [
"tests/test_color.py::test_parse[#000000-expected0]",
"tests/test_color.py::test_parse[#ffffff-expected1]",
"tests/test_color.py::test_parse[#FFFFFF-expected2]",
"tests/test_color.py::test_parse[#020304ff-expected3]",
"tests/test_color.py::test_parse[#02030400-expected4]",
"tests/test_color.py::test_parse... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add conversion to/from the CIE-L*ab color space.
This adds a simple `Lab` class to represent colours in the CIE-L*ab colour space and adds two functions to allow for RGB conversion to and from that space.
Tests are still being written.
Formulas as seen in https://stackoverflow.com/a/8433985/2828287.
Tests use the calculator available in http://www.easyrgb.com/en/convert.php.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/textual/color.py]
(definition of Lab:)
class Lab(NamedTuple):
"""A color in CIE-L*ab format."""
(definition of rgb_to_lab:)
def rgb_to_lab(rgb: Color) -> Lab:
"""Convert an RGB color to the CIE-L*ab format.
Uses the standard RGB color space with a D65/2⁰ standard illuminant.
Conversion passes through the XYZ color space.
Cf. http://www.easyrgb.com/en/math.php."""
(definition of lab_to_rgb:)
def lab_to_rgb(lab: Lab) -> Color:
"""Convert a CIE-L*ab color to RGB.
Uses the standard RGB color space with a D65/2⁰ standard illuminant.
Conversion passes through the XYZ color space.
Cf. http://www.easyrgb.com/en/math.php."""
[end of new definitions in src/textual/color.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 86e93536b991014e0ea4bf993068202b446bb698 | ||
Project-MONAI__MONAI-4061 | 4,061 | Project-MONAI/MONAI | null | c5bf12021a68fc02cbb8f0c91bfd73fe6245d4c4 | 2022-04-04T12:31:13Z | diff --git a/docs/source/data.rst b/docs/source/data.rst
index 2bdf401c7f..e76eb53f39 100644
--- a/docs/source/data.rst
+++ b/docs/source/data.rst
@@ -107,16 +107,23 @@ Patch-based dataset
.. autoclass:: GridPatchDataset
:members:
-`PatchIter`
-~~~~~~~~~~~
-.. autoclass:: PatchIter
- :members:
-
`PatchDataset`
~~~~~~~~~~~~~~
.. autoclass:: PatchDataset
:members:
+`PatchIter`
+"""""""""""
+.. autoclass:: PatchIter
+ :members:
+ :special-members: __call__
+
+`PatchIterd`
+""""""""""""
+.. autoclass:: PatchIterd
+ :members:
+ :special-members: __call__
+
Image reader
------------
diff --git a/monai/data/__init__.py b/monai/data/__init__.py
index bed194d2f4..53aa3d3f46 100644
--- a/monai/data/__init__.py
+++ b/monai/data/__init__.py
@@ -32,7 +32,7 @@
load_decathlon_properties,
)
from .folder_layout import FolderLayout
-from .grid_dataset import GridPatchDataset, PatchDataset, PatchIter
+from .grid_dataset import GridPatchDataset, PatchDataset, PatchIter, PatchIterd
from .image_dataset import ImageDataset
from .image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader, WSIReader
from .image_writer import (
diff --git a/monai/data/grid_dataset.py b/monai/data/grid_dataset.py
index 9eb84a58c9..33497b5a68 100644
--- a/monai/data/grid_dataset.py
+++ b/monai/data/grid_dataset.py
@@ -9,21 +9,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Callable, Dict, Iterable, Optional, Sequence, Union
+from copy import deepcopy
+from typing import Callable, Dict, Hashable, Iterable, Mapping, Optional, Sequence, Union
+import numpy as np
+
+from monai.config import KeysCollection
from monai.data.dataset import Dataset
from monai.data.iterable_dataset import IterableDataset
from monai.data.utils import iter_patch
from monai.transforms import apply_transform
-from monai.utils import NumpyPadMode, deprecated_arg, ensure_tuple, look_up_option
+from monai.utils import NumpyPadMode, deprecated_arg, ensure_tuple, first, look_up_option
-__all__ = ["PatchDataset", "GridPatchDataset", "PatchIter"]
+__all__ = ["PatchDataset", "GridPatchDataset", "PatchIter", "PatchIterd"]
class PatchIter:
"""
- A class to return a patch generator with predefined properties such as `patch_size`.
+ Return a patch generator with predefined properties such as `patch_size`.
Typically used with :py:class:`monai.data.GridPatchDataset`.
+
"""
def __init__(
@@ -42,7 +47,8 @@ def __init__(
``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
One of the listed string values or a user supplied function. Defaults to ``"wrap"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
- pad_opts: padding options, see numpy.pad
+ pad_opts: other arguments for the `np.pad` function.
+ note that `np.pad` treats channel dimension as the first dimension.
Note:
The `patch_size` is the size of the
@@ -52,19 +58,20 @@ def __init__(
specified by a `patch_size` of (10, 10, 10).
"""
- self.patch_size = (None,) + tuple(patch_size)
+ self.patch_size = (None,) + tuple(patch_size) # expand to have the channel dim
self.start_pos = ensure_tuple(start_pos)
self.mode: NumpyPadMode = look_up_option(mode, NumpyPadMode)
self.pad_opts = pad_opts
- def __call__(self, array):
+ def __call__(self, array: np.ndarray):
"""
Args:
array: the image to generate patches from.
+
"""
yield from iter_patch(
array,
- patch_size=self.patch_size, # expand to have the channel dim
+ patch_size=self.patch_size, # type: ignore
start_pos=self.start_pos,
copy_back=False,
mode=self.mode,
@@ -72,17 +79,68 @@ def __call__(self, array):
)
+class PatchIterd:
+ """
+ Dictionary-based wrapper of :py:class:`monai.data.PatchIter`.
+ Return a patch generator for dictionary data and the coordinate, Typically used
+ with :py:class:`monai.data.GridPatchDataset`.
+ Suppose all the expected fields specified by `keys` have same shape.
+
+ Args:
+ keys: keys of the corresponding items to iterate patches.
+ patch_size: size of patches to generate slices for, 0/None selects whole dimension
+ start_pos: starting position in the array, default is 0 for each dimension
+ mode: {``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``,
+ ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
+ One of the listed string values or a user supplied function. Defaults to ``"wrap"``.
+ See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
+ pad_opts: other arguments for the `np.pad` function.
+ note that `np.pad` treats channel dimension as the first dimension.
+
+ """
+
+ coords_key = "patch_coords"
+ original_spatial_shape_key = "original_spatial_shape"
+ start_pos_key = "start_pos"
+
+ def __init__(
+ self,
+ keys: KeysCollection,
+ patch_size: Sequence[int],
+ start_pos: Sequence[int] = (),
+ mode: Union[NumpyPadMode, str] = NumpyPadMode.WRAP,
+ **pad_opts,
+ ):
+ self.keys = ensure_tuple(keys)
+ self.patch_iter = PatchIter(patch_size=patch_size, start_pos=start_pos, mode=mode, **pad_opts)
+
+ def __call__(self, data: Mapping[Hashable, np.ndarray]):
+ d = dict(data)
+ original_spatial_shape = d[first(self.keys)].shape[1:]
+
+ for patch in zip(*[self.patch_iter(d[key]) for key in self.keys]):
+ coords = patch[0][1] # use the coordinate of the first item
+ ret = {k: v[0] for k, v in zip(self.keys, patch)}
+ # fill in the extra keys with unmodified data
+ for k in set(d.keys()).difference(set(self.keys)):
+ ret[k] = deepcopy(d[k])
+ # also store the `coordinate`, `spatial shape of original image`, `start position` in the dictionary
+ ret[self.coords_key] = coords
+ ret[self.original_spatial_shape_key] = original_spatial_shape
+ ret[self.start_pos_key] = self.patch_iter.start_pos
+ yield ret, coords
+
+
class GridPatchDataset(IterableDataset):
"""
- Yields patches from images read from an image dataset.
- Typically used with `PatchIter` so that the patches are chosen in a contiguous grid sampling scheme.
+ Yields patches from data read from an image dataset.
+ Typically used with `PatchIter` or `PatchIterd` so that the patches are chosen in a contiguous grid sampling scheme.
.. code-block:: python
import numpy as np
- from monai.data import GridPatchDataset, DataLoader, PatchIter
- from monai.transforms import RandShiftIntensity
+ from monai.data import GridPatchDataset, DataLoader, PatchIter, RandShiftIntensity
# image-level dataset
images = [np.arange(16, dtype=float).reshape(1, 4, 4),
@@ -109,7 +167,7 @@ class GridPatchDataset(IterableDataset):
data: the data source to read image data from.
patch_iter: converts an input image (item from dataset) into a iterable of image patches.
`patch_iter(dataset[idx])` must yield a tuple: (patches, coordinates).
- see also: :py:class:`monai.data.PatchIter`.
+ see also: :py:class:`monai.data.PatchIter` or :py:class:`monai.data.PatchIterd`.
transform: a callable data transform operates on the patches.
with_coordinates: whether to yield the coordinates of each patch, default to `True`.
diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py
index 19ebe40b46..79f4040018 100644
--- a/monai/transforms/croppad/dictionary.py
+++ b/monai/transforms/croppad/dictionary.py
@@ -70,6 +70,7 @@
"RandCropByPosNegLabeld",
"ResizeWithPadOrCropd",
"BoundingRectd",
+ "RandCropByLabelClassesd",
"SpatialPadD",
"SpatialPadDict",
"BorderPadD",
@@ -98,7 +99,6 @@
"ResizeWithPadOrCropDict",
"BoundingRectD",
"BoundingRectDict",
- "RandCropByLabelClassesd",
"RandCropByLabelClassesD",
"RandCropByLabelClassesDict",
]
| diff --git a/tests/test_grid_dataset.py b/tests/test_grid_dataset.py
index 9361d82cdf..4d2d0d6948 100644
--- a/tests/test_grid_dataset.py
+++ b/tests/test_grid_dataset.py
@@ -14,8 +14,8 @@
import numpy as np
-from monai.data import DataLoader, GridPatchDataset, PatchIter
-from monai.transforms import RandShiftIntensity
+from monai.data import DataLoader, GridPatchDataset, PatchIter, PatchIterd
+from monai.transforms import RandShiftIntensity, RandShiftIntensityd
from monai.utils import set_determinism
@@ -76,6 +76,48 @@ def test_loading_array(self):
item[1], np.array([[[0, 1], [0, 2], [2, 4]], [[0, 1], [2, 4], [2, 4]]]), rtol=1e-5
)
+ def test_loading_dict(self):
+ set_determinism(seed=1234)
+ # test sequence input data with dict
+ data = [
+ {
+ "image": np.arange(16, dtype=float).reshape(1, 4, 4),
+ "label": np.arange(16, dtype=float).reshape(1, 4, 4),
+ "metadata": "test string",
+ },
+ {
+ "image": np.arange(16, dtype=float).reshape(1, 4, 4),
+ "label": np.arange(16, dtype=float).reshape(1, 4, 4),
+ "metadata": "test string",
+ },
+ ]
+ # image level
+ patch_intensity = RandShiftIntensityd(keys="image", offsets=1.0, prob=1.0)
+ patch_iter = PatchIterd(keys=["image", "label"], patch_size=(2, 2), start_pos=(0, 0))
+ ds = GridPatchDataset(data=data, patch_iter=patch_iter, transform=patch_intensity, with_coordinates=True)
+ # use the grid patch dataset
+ for item in DataLoader(ds, batch_size=2, shuffle=False, num_workers=0):
+ np.testing.assert_equal(item[0]["image"].shape, (2, 1, 2, 2))
+ np.testing.assert_equal(item[0]["label"].shape, (2, 1, 2, 2))
+ self.assertListEqual(item[0]["metadata"], ["test string", "test string"])
+ np.testing.assert_allclose(
+ item[0]["image"],
+ np.array([[[[1.4965, 2.4965], [5.4965, 6.4965]]], [[[11.3584, 12.3584], [15.3584, 16.3584]]]]),
+ rtol=1e-4,
+ )
+ np.testing.assert_allclose(item[1], np.array([[[0, 1], [0, 2], [2, 4]], [[0, 1], [2, 4], [2, 4]]]), rtol=1e-5)
+ if sys.platform != "win32":
+ for item in DataLoader(ds, batch_size=2, shuffle=False, num_workers=2):
+ np.testing.assert_equal(item[0]["image"].shape, (2, 1, 2, 2))
+ np.testing.assert_allclose(
+ item[0]["image"],
+ np.array([[[[1.2548, 2.2548], [5.2548, 6.2548]]], [[[9.1106, 10.1106], [13.1106, 14.1106]]]]),
+ rtol=1e-3,
+ )
+ np.testing.assert_allclose(
+ item[1], np.array([[[0, 1], [0, 2], [2, 4]], [[0, 1], [2, 4], [2, 4]]]), rtol=1e-5
+ )
+
if __name__ == "__main__":
unittest.main()
| diff --git a/docs/source/data.rst b/docs/source/data.rst
index 2bdf401c7f..e76eb53f39 100644
--- a/docs/source/data.rst
+++ b/docs/source/data.rst
@@ -107,16 +107,23 @@ Patch-based dataset
.. autoclass:: GridPatchDataset
:members:
-`PatchIter`
-~~~~~~~~~~~
-.. autoclass:: PatchIter
- :members:
-
`PatchDataset`
~~~~~~~~~~~~~~
.. autoclass:: PatchDataset
:members:
+`PatchIter`
+"""""""""""
+.. autoclass:: PatchIter
+ :members:
+ :special-members: __call__
+
+`PatchIterd`
+""""""""""""
+.. autoclass:: PatchIterd
+ :members:
+ :special-members: __call__
+
Image reader
------------
| [
{
"components": [
{
"doc": "Dictionary-based wrapper of :py:class:`monai.data.PatchIter`.\nReturn a patch generator for dictionary data and the coordinate, Typically used\nwith :py:class:`monai.data.GridPatchDataset`.\nSuppose all the expected fields specified by `keys` have same shape.\n\nArgs:\n... | [
"tests/test_grid_dataset.py::TestGridPatchDataset::test_loading_array",
"tests/test_grid_dataset.py::TestGridPatchDataset::test_loading_dict",
"tests/test_grid_dataset.py::TestGridPatchDataset::test_shape"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
4060 Add PatchIter and PatchIterd transform
Fixes #4060 .
### Description
This PR added `PatchIter` and `PatchIterd` transform according to user's feedback.
### Status
**Ready**
### Types of changes
<!--- Put an `x` in all the boxes that apply, and remove the not applicable items -->
- [x] Non-breaking change (fix or new feature that would not break existing functionality).
- [ ] Breaking change (fix or new feature that would cause existing functionality to change).
- [ ] New tests added to cover the changes.
- [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`.
- [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests --disttests`.
- [ ] In-line docstrings updated.
- [ ] Documentation updated, tested `make html` command in the `docs/` folder.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in monai/data/grid_dataset.py]
(definition of PatchIterd:)
class PatchIterd:
"""Dictionary-based wrapper of :py:class:`monai.data.PatchIter`.
Return a patch generator for dictionary data and the coordinate, Typically used
with :py:class:`monai.data.GridPatchDataset`.
Suppose all the expected fields specified by `keys` have same shape.
Args:
keys: keys of the corresponding items to iterate patches.
patch_size: size of patches to generate slices for, 0/None selects whole dimension
start_pos: starting position in the array, default is 0 for each dimension
mode: {``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``,
``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
One of the listed string values or a user supplied function. Defaults to ``"wrap"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
pad_opts: other arguments for the `np.pad` function.
note that `np.pad` treats channel dimension as the first dimension."""
(definition of PatchIterd.__init__:)
def __init__( self, keys: KeysCollection, patch_size: Sequence[int], start_pos: Sequence[int] = (), mode: Union[NumpyPadMode, str] = NumpyPadMode.WRAP, **pad_opts, ):
(definition of PatchIterd.__call__:)
def __call__(self, data: Mapping[Hashable, np.ndarray]):
[end of new definitions in monai/data/grid_dataset.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Add a `dict` version `PatchIterd`
**Is your feature request related to a problem? Please describe.**
Thanks for user's feedback in https://github.com/Project-MONAI/MONAI/discussions/4058, it would be nice to add a dict version `PatchIterd` with expected `keys` for the `GridPatchDataset` dataset.
----------
--------------------
</issues> | e73257caa79309dcce1e93abf1632f4bfd75b11f |
pylint-dev__pylint-6131 | 6,131 | pylint-dev/pylint | 2.14 | d9b74d913e46ba36f4dbe21be5d6d3c719a04824 | 2022-04-02T11:26:40Z | diff --git a/doc/data/messages/c/confusing-consecutive-elif/bad.py b/doc/data/messages/c/confusing-consecutive-elif/bad.py
new file mode 100644
index 0000000000..93e1e2dee8
--- /dev/null
+++ b/doc/data/messages/c/confusing-consecutive-elif/bad.py
@@ -0,0 +1,6 @@
+def myfunc(shall_continue: bool, shall_exit: bool):
+ if shall_continue:
+ if input("Are you sure?") == "y":
+ print("Moving on.")
+ elif shall_exit: # [confusing-consecutive-elif]
+ print("Exiting.")
diff --git a/doc/data/messages/c/confusing-consecutive-elif/details.rst b/doc/data/messages/c/confusing-consecutive-elif/details.rst
new file mode 100644
index 0000000000..bd2ecc4ee5
--- /dev/null
+++ b/doc/data/messages/c/confusing-consecutive-elif/details.rst
@@ -0,0 +1,1 @@
+Creating a function for the nested conditional, or adding an explicit ``else`` in the indented ``if`` statement, even if it only contains a ``pass`` statement, can help clarify the code.
diff --git a/doc/data/messages/c/confusing-consecutive-elif/good.py b/doc/data/messages/c/confusing-consecutive-elif/good.py
new file mode 100644
index 0000000000..1722a6bfa6
--- /dev/null
+++ b/doc/data/messages/c/confusing-consecutive-elif/good.py
@@ -0,0 +1,22 @@
+# Option 1: add explicit 'else'
+def myfunc(shall_continue: bool, shall_exit: bool):
+ if shall_continue:
+ if input("Are you sure?") == "y":
+ print("Moving on.")
+ else:
+ pass
+ elif shall_exit:
+ print("Exiting.")
+
+
+# Option 2: extract function
+def user_confirmation():
+ if input("Are you sure?") == "y":
+ print("Moving on.")
+
+
+def myfunc2(shall_continue: bool, shall_exit: bool):
+ if shall_continue:
+ user_confirmation()
+ elif shall_exit:
+ print("Exiting.")
diff --git a/doc/data/messages/c/confusing-consecutive-elif/pylintrc b/doc/data/messages/c/confusing-consecutive-elif/pylintrc
new file mode 100644
index 0000000000..6a11b2c099
--- /dev/null
+++ b/doc/data/messages/c/confusing-consecutive-elif/pylintrc
@@ -0,0 +1,2 @@
+[MASTER]
+load-plugins=pylint.extensions.confusing_elif
| diff --git a/doc/test_messages_documentation.py b/doc/test_messages_documentation.py
index e131d318ba..f80542f07d 100644
--- a/doc/test_messages_documentation.py
+++ b/doc/test_messages_documentation.py
@@ -7,7 +7,7 @@
from collections import Counter
from pathlib import Path
from typing import Counter as CounterType
-from typing import List, TextIO, Tuple
+from typing import List, Optional, TextIO, Tuple
import pytest
@@ -53,7 +53,13 @@ def __init__(self, test_file: Tuple[str, Path]) -> None:
self._linter.config.persistent = 0
checkers.initialize(self._linter)
- config_file = next(config.find_default_config_files(), None)
+ # Check if this message has a custom configuration file (e.g. for enabling optional checkers).
+ # If not, use the default configuration.
+ config_file: Optional[Path]
+ if (test_file[1].parent / "pylintrc").exists():
+ config_file = test_file[1].parent / "pylintrc"
+ else:
+ config_file = next(config.find_default_config_files(), None)
_config_initialization(
self._linter,
| diff --git a/doc/data/messages/c/confusing-consecutive-elif/details.rst b/doc/data/messages/c/confusing-consecutive-elif/details.rst
new file mode 100644
index 0000000000..bd2ecc4ee5
--- /dev/null
+++ b/doc/data/messages/c/confusing-consecutive-elif/details.rst
@@ -0,0 +1,1 @@
+Creating a function for the nested conditional, or adding an explicit ``else`` in the indented ``if`` statement, even if it only contains a ``pass`` statement, can help clarify the code.
diff --git a/doc/data/messages/c/confusing-consecutive-elif/pylintrc b/doc/data/messages/c/confusing-consecutive-elif/pylintrc
new file mode 100644
index 0000000000..6a11b2c099
--- /dev/null
+++ b/doc/data/messages/c/confusing-consecutive-elif/pylintrc
@@ -0,0 +1,2 @@
+[MASTER]
+load-plugins=pylint.extensions.confusing_elif
| [
{
"components": [
{
"doc": "",
"lines": [
1,
6
],
"name": "myfunc",
"signature": "def myfunc(shall_continue: bool, shall_exit: bool):",
"type": "function"
}
],
"file": "doc/data/messages/c/confusing-consecutive-elif/bad.py"
... | [
"doc/test_messages_documentation.py::test_code_examples[confusing-consecutive-elif-good]",
"doc/test_messages_documentation.py::test_code_examples[confusing-consecutive-elif-bad]"
] | [
"doc/test_messages_documentation.py::test_code_examples[assert-on-tuple-good]",
"doc/test_messages_documentation.py::test_code_examples[assert-on-tuple-bad]",
"doc/test_messages_documentation.py::test_code_examples[arguments-differ-good]",
"doc/test_messages_documentation.py::test_code_examples[arguments-diff... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Enable usage of custom pylintrc file for message documentation tests
To ease the process of reviewing your PR, do make sure to complete the following boxes.
- [x] Write a good description on what the PR does.
- [x] If you used multiple emails or multiple names when contributing, add your mails
and preferred name in ``script/.contributors_aliases.json``
## Type of Changes
<!-- Leave the corresponding lines for the applicable type of change: -->
| | Type |
| --- | ---------------------- |
| | :bug: Bug fix |
| ✓ | :sparkles: New feature |
| | :hammer: Refactoring |
| ✓ | :scroll: Docs |
## Description
In order to test some messages (e.g. for messages of optional checkers), we need a way to use a custom config.
With this modification it is possible to place a ``pylintrc`` file next to ``good.py`` and ``bad.py``.
To prove it works I added a documentation example for one of the optional checkers.
Ref: https://github.com/PyCQA/pylint/issues/5953#issuecomment-1085587049
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in doc/data/messages/c/confusing-consecutive-elif/bad.py]
(definition of myfunc:)
def myfunc(shall_continue: bool, shall_exit: bool):
[end of new definitions in doc/data/messages/c/confusing-consecutive-elif/bad.py]
[start of new definitions in doc/data/messages/c/confusing-consecutive-elif/good.py]
(definition of myfunc:)
def myfunc(shall_continue: bool, shall_exit: bool):
(definition of user_confirmation:)
def user_confirmation():
(definition of myfunc2:)
def myfunc2(shall_continue: bool, shall_exit: bool):
[end of new definitions in doc/data/messages/c/confusing-consecutive-elif/good.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 055c1920140ebf36501d30307d5ab86965ae185f | |
conan-io__conan-10906 | 10,906 | conan-io/conan | null | 7c5f0af344ff775d6bbd39795f056f4774d17984 | 2022-03-28T15:44:19Z | diff --git a/conan/tools/apple/xcodedeps.py b/conan/tools/apple/xcodedeps.py
index 74fbc4ae582..bae9fc71caa 100644
--- a/conan/tools/apple/xcodedeps.py
+++ b/conan/tools/apple/xcodedeps.py
@@ -39,14 +39,15 @@ def _xcconfig_conditional(settings):
return "[config={}][arch={}][sdk={}]".format(settings.get_safe("build_type"), architecture, sdk_condition)
-def _add_include_to_file_or_create(filename, template, include):
+def _add_includes_to_file_or_create(filename, template, files_to_include):
if os.path.isfile(filename):
content = load(filename)
else:
content = template
- if include not in content:
- content = content + '#include "{}"\n'.format(include)
+ for include in files_to_include:
+ if include not in content:
+ content = content + '#include "{}"\n'.format(include)
return content
@@ -194,9 +195,9 @@ def _all_xconfig_file(self, deps):
@property
def _global_xconfig_content(self):
- return _add_include_to_file_or_create(GLOBAL_XCCONFIG_FILENAME,
- GLOBAL_XCCONFIG_TEMPLATE,
- self.general_name)
+ return _add_includes_to_file_or_create(GLOBAL_XCCONFIG_FILENAME,
+ GLOBAL_XCCONFIG_TEMPLATE,
+ [self.general_name])
def _content(self):
result = {}
diff --git a/conan/tools/apple/xcodetoolchain.py b/conan/tools/apple/xcodetoolchain.py
index 7e830dd186a..bc08b2a3ec2 100644
--- a/conan/tools/apple/xcodetoolchain.py
+++ b/conan/tools/apple/xcodetoolchain.py
@@ -1,11 +1,10 @@
-import os
import textwrap
from conan.tools._check_build_profile import check_using_build_profile
from conan.tools._compilers import cppstd_flag
from conan.tools.apple.apple import to_apple_arch
from conan.tools.apple.xcodedeps import GLOBAL_XCCONFIG_FILENAME, GLOBAL_XCCONFIG_TEMPLATE, \
- _add_include_to_file_or_create, _xcconfig_settings_filename, _xcconfig_conditional
+ _add_includes_to_file_or_create, _xcconfig_settings_filename, _xcconfig_conditional
from conans.util.files import save
@@ -20,6 +19,14 @@ class XcodeToolchain(object):
{clang_cxx_language_standard}
""")
+ _flags_xconfig = textwrap.dedent("""\
+ // Global flags
+ {defines}
+ {cflags}
+ {cppflags}
+ {ldflags}
+ """)
+
_agreggated_xconfig = textwrap.dedent("""\
// Conan XcodeToolchain generated file
// Includes all installed configurations
@@ -35,12 +42,20 @@ def __init__(self, conanfile):
self.sdk_version = conanfile.settings.get_safe("os.sdk_version")
self.libcxx = conanfile.settings.get_safe("compiler.libcxx")
self.os_version = conanfile.settings.get_safe("os.version")
+ self._global_defines = self._conanfile.conf.get("tools.build:defines", default=[], check_type=list)
+ self._global_cxxflags = self._conanfile.conf.get("tools.build:cxxflags", default=[], check_type=list)
+ self._global_cflags = self._conanfile.conf.get("tools.build:cflags", default=[], check_type=list)
+ sharedlinkflags = self._conanfile.conf.get("tools.build:sharedlinkflags", default=[], check_type=list)
+ exelinkflags = self._conanfile.conf.get("tools.build:exelinkflags", default=[], check_type=list)
+ self._global_ldflags = sharedlinkflags + exelinkflags
check_using_build_profile(self._conanfile)
def generate(self):
- save(GLOBAL_XCCONFIG_FILENAME, self._global_xconfig_content)
save(self._agreggated_xconfig_filename, self._agreggated_xconfig_content)
save(self._vars_xconfig_filename, self._vars_xconfig_content)
+ if self._check_if_extra_flags:
+ save(self._flags_xcconfig_filename, self._flags_xcconfig_content)
+ save(GLOBAL_XCCONFIG_FILENAME, self._global_xconfig_content)
@property
def _cppstd(self):
@@ -77,16 +92,36 @@ def _vars_xconfig_content(self):
@property
def _agreggated_xconfig_content(self):
- return _add_include_to_file_or_create(self._agreggated_xconfig_filename,
- self._agreggated_xconfig,
- self._vars_xconfig_filename)
+ return _add_includes_to_file_or_create(self._agreggated_xconfig_filename,
+ self._agreggated_xconfig,
+ [self._vars_xconfig_filename])
@property
def _global_xconfig_content(self):
- return _add_include_to_file_or_create(GLOBAL_XCCONFIG_FILENAME,
- GLOBAL_XCCONFIG_TEMPLATE,
- self._agreggated_xconfig_filename)
+ files_to_include = [self._agreggated_xconfig_filename]
+ if self._check_if_extra_flags:
+ files_to_include.append(self._flags_xcconfig_filename)
+ content = _add_includes_to_file_or_create(GLOBAL_XCCONFIG_FILENAME, GLOBAL_XCCONFIG_TEMPLATE,
+ files_to_include)
+ return content
@property
def _agreggated_xconfig_filename(self):
return self.filename + self.extension
+
+ @property
+ def _check_if_extra_flags(self):
+ return self._global_cflags or self._global_cxxflags or self._global_ldflags
+
+ @property
+ def _flags_xcconfig_content(self):
+ defines = "GCC_PREPROCESSOR_DEFINITIONS = $(inherited) {}".format(" ".join(self._global_defines)) if self._global_defines else ""
+ cflags = "OTHER_CFLAGS = $(inherited) {}".format(" ".join(self._global_cflags)) if self._global_cflags else ""
+ cppflags = "OTHER_CPLUSPLUSFLAGS = $(inherited) {}".format(" ".join(self._global_cxxflags)) if self._global_cxxflags else ""
+ ldflags = "OTHER_LDFLAGS = $(inherited) {}".format(" ".join(self._global_ldflags)) if self._global_ldflags else ""
+ ret = self._flags_xconfig.format(defines=defines, cflags=cflags, cppflags=cppflags, ldflags=ldflags)
+ return ret
+
+ @property
+ def _flags_xcconfig_filename(self):
+ return "conan_global_flags" + self.extension
| diff --git a/conans/test/functional/toolchains/apple/test_xcodetoolchain.py b/conans/test/functional/toolchains/apple/test_xcodetoolchain.py
index 29772e794d8..bf68e54ab79 100644
--- a/conans/test/functional/toolchains/apple/test_xcodetoolchain.py
+++ b/conans/test/functional/toolchains/apple/test_xcodetoolchain.py
@@ -44,6 +44,7 @@
4130DB6627BE8D0300BDEE84 /* conan_config.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conan_config.xcconfig; sourceTree = "<group>"; };
4130DB6727BE8D0300BDEE84 /* conantoolchain.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conantoolchain.xcconfig; sourceTree = "<group>"; };
4130DB6827BE8D0300BDEE84 /* conandeps.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conandeps.xcconfig; sourceTree = "<group>"; };
+ 416ED66527F1FFAE00664526 /* conan_global_flags.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = conan_global_flags.xcconfig; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
@@ -85,6 +86,7 @@
4130DB6927BE8D0D00BDEE84 /* conan */ = {
isa = PBXGroup;
children = (
+ 416ED66527F1FFAE00664526 /* conan_global_flags.xcconfig */,
4130DB6627BE8D0300BDEE84 /* conan_config.xcconfig */,
4130DB6527BE8D0300BDEE84 /* conan_hello_debug_x86_64_macosx_12_1.xcconfig */,
4130DB5F27BE8D0300BDEE84 /* conan_hello_release_x86_64_macosx_12_1.xcconfig */,
@@ -372,7 +374,8 @@ def package_info(self):
sdk_version = "11.3"
settings = "-s arch=x86_64 -s os.sdk=macosx -s os.sdk_version={} -s compiler.cppstd={} " \
- "-s compiler.libcxx=libc++ -s os.version={} ".format(sdk_version, cppstd, min_version)
+ "-s compiler.libcxx=libc++ -s os.version={} " \
+ "-c 'tools.build:cflags=[\"-fstack-protector-strong\"]'".format(sdk_version, cppstd, min_version)
client.run("create . -s build_type=Release {} --build=missing".format(settings))
assert "main __x86_64__ defined" in client.out
@@ -380,3 +383,4 @@ def package_info(self):
assert "minos {}".format(min_version) in client.out
assert "sdk {}".format(sdk_version) in client.out
assert "libc++" in client.out
+ assert " -fstack-protector-strong -" in client.out
diff --git a/conans/test/integration/toolchains/apple/test_xcodetoolchain.py b/conans/test/integration/toolchains/apple/test_xcodetoolchain.py
index b38a8419ed1..4aef8e16d66 100644
--- a/conans/test/integration/toolchains/apple/test_xcodetoolchain.py
+++ b/conans/test/integration/toolchains/apple/test_xcodetoolchain.py
@@ -53,3 +53,21 @@ def test_toolchain_files(configuration, os_version, cppstd, libcxx, arch, sdk_na
assert 'MACOSX_DEPLOYMENT_TARGET{}={}'.format(condition, os_version) in toolchain_vars
if cppstd:
assert 'CLANG_CXX_LANGUAGE_STANDARD{}={}'.format(condition, clang_cppstd) in toolchain_vars
+
+
+def test_toolchain_flags():
+ client = TestClient()
+ client.save({"conanfile.txt": "[generators]\nXcodeToolchain\n"})
+ cmd = "install . -c 'tools.build:cxxflags=[\"flag1\"]' " \
+ "-c 'tools.build:defines=[\"MYDEFINITION\"]' " \
+ "-c 'tools.build:cflags=[\"flag2\"]' " \
+ "-c 'tools.build:sharedlinkflags=[\"flag3\"]' " \
+ "-c 'tools.build:exelinkflags=[\"flag4\"]'"
+ client.run(cmd)
+ conan_global_flags = client.load("conan_global_flags.xcconfig")
+ assert "GCC_PREPROCESSOR_DEFINITIONS = $(inherited) MYDEFINITION" in conan_global_flags
+ assert "OTHER_CFLAGS = $(inherited) flag2" in conan_global_flags
+ assert "OTHER_CPLUSPLUSFLAGS = $(inherited) flag1" in conan_global_flags
+ assert "OTHER_LDFLAGS = $(inherited) flag3 flag4" in conan_global_flags
+ conan_global_file = client.load("conan_config.xcconfig")
+ assert '#include "conan_global_flags.xcconfig"' in conan_global_file
| [
{
"components": [
{
"doc": "",
"lines": [
42,
52
],
"name": "_add_includes_to_file_or_create",
"signature": "def _add_includes_to_file_or_create(filename, template, files_to_include):",
"type": "function"
}
],
"file": "conan... | [
"conans/test/integration/toolchains/apple/test_xcodetoolchain.py::test_toolchain_flags"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add extra flags via [conf] into XcodeToolchain
Changelog: Feature: Add extra flags via [conf] into XcodeToolchain.
Docs: https://github.com/conan-io/docs/pull/2471
Related to: https://github.com/conan-io/conan/pull/10928
Closes: https://github.com/conan-io/conan/issues/10886
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/apple/xcodedeps.py]
(definition of _add_includes_to_file_or_create:)
def _add_includes_to_file_or_create(filename, template, files_to_include):
[end of new definitions in conan/tools/apple/xcodedeps.py]
[start of new definitions in conan/tools/apple/xcodetoolchain.py]
(definition of XcodeToolchain._check_if_extra_flags:)
def _check_if_extra_flags(self):
(definition of XcodeToolchain._flags_xcconfig_content:)
def _flags_xcconfig_content(self):
(definition of XcodeToolchain._flags_xcconfig_filename:)
def _flags_xcconfig_filename(self):
[end of new definitions in conan/tools/apple/xcodetoolchain.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
conan-io__conan-10874 | 10,874 | conan-io/conan | null | acf5d9a460adfa16bf95b6aadfc248f7d9ace643 | 2022-03-24T11:52:39Z | diff --git a/conan/tools/files/__init__.py b/conan/tools/files/__init__.py
index fcffac80e2e..01f5d8c67ae 100644
--- a/conan/tools/files/__init__.py
+++ b/conan/tools/files/__init__.py
@@ -1,4 +1,4 @@
-from conan.tools.files.files import load, save, mkdir, ftp_download, download, get, rename, \
+from conan.tools.files.files import load, save, mkdir, rmdir, ftp_download, download, get, rename, \
chdir, unzip, replace_in_file, collect_libs, check_md5, check_sha1, check_sha256
from conan.tools.files.patches import patch, apply_conandata_patches
diff --git a/conan/tools/files/files.py b/conan/tools/files/files.py
index 07b1a1e9333..2e331a76aea 100644
--- a/conan/tools/files/files.py
+++ b/conan/tools/files/files.py
@@ -15,7 +15,7 @@
from conan.tools import CONAN_TOOLCHAIN_ARGS_FILE, CONAN_TOOLCHAIN_ARGS_SECTION
from conans.client.downloaders.download import run_downloader
from conans.errors import ConanException
-from conans.util.files import rmdir
+from conans.util.files import rmdir as _internal_rmdir
if six.PY3: # Remove this IF in develop2
from shutil import which
@@ -61,6 +61,10 @@ def mkdir(conanfile, path):
os.makedirs(path)
+def rmdir(conanfile, path):
+ _internal_rmdir(path)
+
+
def get(conanfile, url, md5='', sha1='', sha256='', destination=".", filename="",
keep_permissions=False, pattern=None, verify=True, retry=None, retry_wait=None,
auth=None, headers=None, strip_root=False):
@@ -507,7 +511,7 @@ def swap_child_folder(parent_folder, child_folder):
if os.path.isfile(path):
os.remove(path)
else:
- rmdir(path)
+ _internal_rmdir(path)
child = os.path.join(parent_folder, child_folder)
for f in os.listdir(child):
shutil.move(os.path.join(child, f), os.path.join(parent_folder, f))
| diff --git a/conans/test/integration/tools/file_tools_test.py b/conans/test/integration/tools/file_tools_test.py
new file mode 100644
index 00000000000..b7d3b80b846
--- /dev/null
+++ b/conans/test/integration/tools/file_tools_test.py
@@ -0,0 +1,30 @@
+import os
+import textwrap
+
+from conans.test.utils.tools import TestClient
+
+
+def test_file_tools():
+
+ conanfile = textwrap.dedent("""
+
+ from conan import ConanFile
+ from conan.tools.files import rmdir, mkdir
+
+ class pkg(ConanFile):
+
+ def layout(self):
+ self.folders.generators = "gen"
+
+ def generate(self):
+ mkdir(self, "folder1")
+ mkdir(self, "folder2")
+ rmdir(self, "folder2")
+
+ """)
+
+ client = TestClient()
+ client.save({"conanfile.py": conanfile})
+ client.run("install . ")
+ assert os.path.exists(os.path.join(client.current_folder, "gen", "folder1"))
+ assert not os.path.exists(os.path.join(client.current_folder, "gen", "folder2"))
| [
{
"components": [
{
"doc": "",
"lines": [
64,
65
],
"name": "rmdir",
"signature": "def rmdir(conanfile, path):",
"type": "function"
}
],
"file": "conan/tools/files/files.py"
}
] | [
"conans/test/integration/tools/file_tools_test.py::test_file_tools"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
basic conan.tools.files.rmdir
Changelog: Feature: Added basic `rmdir` tool at `conan.tools.files`.
Docs: https://github.com/conan-io/docs/pull/2470
Close #10711
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/files/files.py]
(definition of rmdir:)
def rmdir(conanfile, path):
[end of new definitions in conan/tools/files/files.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
[feature] Move [rmdir] to conan.tools.files
Currently, it is missing in the new namespace.
----------
--------------------
</issues> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | |
RDFLib__rdflib-1773 | 1,773 | RDFLib/rdflib | null | 8bad917cbc8213e176a47fd37d24f487485dda17 | 2022-03-24T02:05:25Z | diff --git a/rdflib/term.py b/rdflib/term.py
index b53d9287d..2e577f4db 100644
--- a/rdflib/term.py
+++ b/rdflib/term.py
@@ -564,7 +564,8 @@ class Literal(Identifier):
_language: Optional[str]
# NOTE: _datatype should maybe be of type URIRef, and not optional.
_datatype: Optional[str]
- __slots__ = ("_language", "_datatype", "_value")
+ _ill_formed: Optional[bool]
+ __slots__ = ("_language", "_datatype", "_value", "_ill_formed")
def __new__(
cls,
@@ -586,12 +587,13 @@ def __new__(
)
if lang is not None and not _is_valid_langtag(lang):
- raise ValueError("'%s' is not a valid language tag!" % lang)
+ raise ValueError(f"'{str(lang)}' is not a valid language tag!")
if datatype:
datatype = URIRef(datatype)
value = None
+ ill_formed: Optional[bool] = None
if isinstance(lexical_or_value, Literal):
# create from another Literal instance
@@ -607,7 +609,13 @@ def __new__(
# passed a string
# try parsing lexical form of datatyped literal
value = _castLexicalToPython(lexical_or_value, datatype)
-
+ if datatype and datatype in _toPythonMapping:
+ # datatype is a recognized datatype IRI:
+ # https://www.w3.org/TR/rdf11-concepts/#dfn-recognized-datatype-iris
+ dt_uri: URIRef = URIRef(datatype)
+ checker = _check_well_formed_types.get(dt_uri, _well_formed_by_value)
+ well_formed = checker(lexical_or_value, value)
+ ill_formed = ill_formed or (not well_formed)
if value is not None and normalize:
_value, _datatype = _castPythonToLiteral(value, datatype)
if _value is not None and _is_valid_unicode(_value):
@@ -641,6 +649,7 @@ def __new__(
inst._language = lang
inst._datatype = datatype
inst._value = value
+ inst._ill_formed = ill_formed
return inst
@@ -663,6 +672,19 @@ def normalize(self) -> "Literal":
else:
return self
+ @property
+ def ill_formed(self) -> Optional[bool]:
+ """
+ For `recognized datatype IRIs
+ <https://www.w3.org/TR/rdf11-concepts/#dfn-recognized-datatype-iris>`_,
+ this value will be `True` if the literal is ill formed, otherwise it
+ will be `False`. `Literal.value` (i.e. the `literal value <https://www.w3.org/TR/rdf11-concepts/#dfn-literal-value>`_) should always be defined if this property is `False`, but should not be considered reliable if this property is `True`.
+
+ If the literal's datatype is `None` or not in the set of `recognized datatype IRIs
+ <https://www.w3.org/TR/rdf11-concepts/#dfn-recognized-datatype-iris>`_ this value will be `None`.
+ """
+ return self._ill_formed
+
@property
def value(self) -> Any:
return self._value
@@ -1471,9 +1493,14 @@ def _unhexlify(value: Union[str, bytes, Literal]) -> bytes:
return unhexlify(value)
-def _parseBoolean(value: str) -> bool:
- true_accepted_values = ["1", "true"]
- false_accepted_values = ["0", "false"]
+def _parseBoolean(value: Union[str, bytes]) -> bool:
+ """
+ Boolean is a datatype with value space {true,false},
+ lexical space {"true", "false","1","0"} and
+ lexical-to-value mapping {"true"→true, "false"→false, "1"→true, "0"→false}.
+ """
+ true_accepted_values = ["1", "true", b"1", b"true"]
+ false_accepted_values = ["0", "false", b"0", b"false"]
new_value = value.lower()
if new_value in true_accepted_values:
return True
@@ -1485,6 +1512,102 @@ def _parseBoolean(value: str) -> bool:
return False
+def _well_formed_by_value(lexical: Union[str, bytes], value: Any) -> bool:
+ """
+ This function is used as the fallback for detecting ill-typed/ill-formed
+ literals and operates on the asumption that if a value (i.e.
+ `Literal.value`) could be determined for a Literal then it is not
+ ill-typed/ill-formed.
+
+ This function will be called with `Literal.lexical` and `Literal.value` as arguments.
+ """
+ return value is not None
+
+
+def _well_formed_unsignedlong(lexical: Union[str, bytes], value: Any) -> bool:
+ """
+ xsd:unsignedInteger and xsd:unsignedLong must not be negative
+ """
+ return len(lexical) > 0 and isinstance(value, long_type) and value >= 0
+
+
+def _well_formed_boolean(lexical: Union[str, bytes], value: Any) -> bool:
+ """
+ Boolean is a datatype with value space {true,false},
+ lexical space {"true", "false","1","0"} and
+ lexical-to-value mapping {"true"→true, "false"→false, "1"→true, "0"→false}.
+ """
+ return lexical in ("true", b"true", "false", b"false", "1", b"1", "0", b"0")
+
+
+def _well_formed_int(lexical: Union[str, bytes], value: Any) -> bool:
+ """
+ The value space of xs:int is the set of common single size integers (32 bits),
+ i.e., the integers between -2147483648 and 2147483647,
+ its lexical space allows any number of insignificant leading zeros.
+ """
+ return (
+ len(lexical) > 0
+ and isinstance(value, int)
+ and (-2147483648 <= value <= 2147483647)
+ )
+
+
+def _well_formed_unsignedint(lexical: Union[str, bytes], value: Any) -> bool:
+ """
+ xsd:unsignedInt has a 32bit value of between 0 and 4294967295
+ """
+ return len(lexical) > 0 and isinstance(value, int) and (0 <= value <= 4294967295)
+
+
+def _well_formed_short(lexical: Union[str, bytes], value: Any) -> bool:
+ """
+ The value space of xs:short is the set of common short integers (16 bits),
+ i.e., the integers between -32768 and 32767,
+ its lexical space allows any number of insignificant leading zeros.
+ """
+ return len(lexical) > 0 and isinstance(value, int) and (-32768 <= value <= 32767)
+
+
+def _well_formed_unsignedshort(lexical: Union[str, bytes], value: Any) -> bool:
+ """
+ xsd:unsignedShort has a 16bit value of between 0 and 65535
+ """
+ return len(lexical) > 0 and isinstance(value, int) and (0 <= value <= 65535)
+
+
+def _well_formed_byte(lexical: Union[str, bytes], value: Any) -> bool:
+ """
+ The value space of xs:byte is the set of common single byte integers (8 bits),
+ i.e., the integers between -128 and 127,
+ its lexical space allows any number of insignificant leading zeros.
+ """
+ return len(lexical) > 0 and isinstance(value, int) and (-128 <= value <= 127)
+
+
+def _well_formed_unsignedbyte(lexical: Union[str, bytes], value: Any) -> bool:
+ """
+ xsd:unsignedByte has a 8bit value of between 0 and 255
+ """
+ return len(lexical) > 0 and isinstance(value, int) and (0 <= value <= 255)
+
+
+def _well_formed_non_negative_integer(lexical: Union[str, bytes], value: Any) -> bool:
+ return isinstance(value, int) and value >= 0
+
+
+def _well_formed_positive_integer(lexical: Union[str, bytes], value: Any) -> bool:
+ return isinstance(value, int) and value > 0
+
+
+def _well_formed_non_positive_integer(lexical: Union[str, bytes], value: Any) -> bool:
+ return isinstance(value, int) and value <= 0
+
+
+def _well_formed_negative_integer(lexical: Union[str, bytes], value: Any) -> bool:
+ return isinstance(value, int) and value < 0
+
+
# Cannot import Namespace/XSD because of circular dependencies
_XSD_PFX = "http://www.w3.org/2001/XMLSchema#"
_RDF_PFX = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
@@ -1674,15 +1797,15 @@ def _castPythonToLiteral(
URIRef(_XSD_PFX + "boolean"): _parseBoolean,
URIRef(_XSD_PFX + "decimal"): Decimal,
URIRef(_XSD_PFX + "integer"): long_type,
- URIRef(_XSD_PFX + "nonPositiveInteger"): int,
+ URIRef(_XSD_PFX + "nonPositiveInteger"): long_type,
URIRef(_XSD_PFX + "long"): long_type,
- URIRef(_XSD_PFX + "nonNegativeInteger"): int,
- URIRef(_XSD_PFX + "negativeInteger"): int,
- URIRef(_XSD_PFX + "int"): long_type,
+ URIRef(_XSD_PFX + "nonNegativeInteger"): long_type,
+ URIRef(_XSD_PFX + "negativeInteger"): long_type,
+ URIRef(_XSD_PFX + "int"): int,
URIRef(_XSD_PFX + "unsignedLong"): long_type,
- URIRef(_XSD_PFX + "positiveInteger"): int,
+ URIRef(_XSD_PFX + "positiveInteger"): long_type,
URIRef(_XSD_PFX + "short"): int,
- URIRef(_XSD_PFX + "unsignedInt"): long_type,
+ URIRef(_XSD_PFX + "unsignedInt"): int,
URIRef(_XSD_PFX + "byte"): int,
URIRef(_XSD_PFX + "unsignedShort"): int,
URIRef(_XSD_PFX + "unsignedByte"): int,
@@ -1694,6 +1817,21 @@ def _castPythonToLiteral(
_RDF_HTMLLITERAL: _parseHTML,
}
+_check_well_formed_types: Dict[URIRef, Callable[[Union[str, bytes], Any], bool]] = {
+ URIRef(_XSD_PFX + "boolean"): _well_formed_boolean,
+ URIRef(_XSD_PFX + "nonPositiveInteger"): _well_formed_non_positive_integer,
+ URIRef(_XSD_PFX + "nonNegativeInteger"): _well_formed_non_negative_integer,
+ URIRef(_XSD_PFX + "negativeInteger"): _well_formed_negative_integer,
+ URIRef(_XSD_PFX + "positiveInteger"): _well_formed_positive_integer,
+ URIRef(_XSD_PFX + "int"): _well_formed_int,
+ URIRef(_XSD_PFX + "short"): _well_formed_short,
+ URIRef(_XSD_PFX + "byte"): _well_formed_byte,
+ URIRef(_XSD_PFX + "unsignedInt"): _well_formed_unsignedint,
+ URIRef(_XSD_PFX + "unsignedLong"): _well_formed_unsignedlong,
+ URIRef(_XSD_PFX + "unsignedShort"): _well_formed_unsignedshort,
+ URIRef(_XSD_PFX + "unsignedByte"): _well_formed_unsignedbyte,
+}
+
_toPythonMapping: Dict[Optional[str], Optional[Callable[[str], Any]]] = {}
_toPythonMapping.update(XSDToPython)
| diff --git a/test/test_literal/test_literal.py b/test/test_literal/test_literal.py
index 990e84645..44c3e25c1 100644
--- a/test/test_literal/test_literal.py
+++ b/test/test_literal/test_literal.py
@@ -8,16 +8,19 @@
# mypy: warn_return_any, no_implicit_reexport, strict_equality
from decimal import Decimal
-from typing import Any, Optional, Sequence, Tuple, Type
+from typing import Any, Optional, Sequence, Tuple, Type, Union
import unittest
import datetime
import rdflib # needed for eval(repr(...)) below
from rdflib.term import Literal, URIRef, _XSD_DOUBLE, bind, _XSD_BOOLEAN
from rdflib import XSD
+from rdflib.namespace import RDF, Namespace
import pytest
+EGNS = Namespace("http://example.com/")
+
class TestLiteral(unittest.TestCase):
def setUp(self) -> None:
@@ -80,6 +83,62 @@ def test_cant_pass_invalid_lang(
with pytest.raises(exception_type):
Literal("foo", lang=lang)
+ @pytest.mark.parametrize(
+ "lexical, datatype, is_ill_formed",
+ [
+ ("true", XSD.boolean, False),
+ ("1", XSD.boolean, False),
+ (b"false", XSD.boolean, False),
+ (b"0", XSD.boolean, False),
+ ("yes", XSD.boolean, True),
+ ("200", XSD.byte, True),
+ (b"-128", XSD.byte, False),
+ ("127", XSD.byte, False),
+ ("255", XSD.unsignedByte, False),
+ ("-100", XSD.unsignedByte, True),
+ (b"200", XSD.unsignedByte, False),
+ (b"64300", XSD.short, True),
+ ("-6000", XSD.short, False),
+ ("1000000", XSD.nonNegativeInteger, False),
+ ("-100", XSD.nonNegativeInteger, True),
+ ("a", XSD.double, True),
+ ("0", XSD.double, False),
+ ("0.1", XSD.double, False),
+ ("0.1", XSD.decimal, False),
+ ("0.g", XSD.decimal, True),
+ ("b", XSD.integer, True),
+ ("2147483647", XSD.int, False),
+ ("2147483648", XSD.int, True),
+ ("2147483648", XSD.integer, False),
+ ("valid ASCII", XSD.string, False),
+ pytest.param("هذا رجل ثلج⛄", XSD.string, False, id="snowman-ar"),
+ ("More ASCII", None, None),
+ ("Not a valid time", XSD.time, True),
+ ("Not a valid date", XSD.date, True),
+ ("7264666c6962", XSD.hexBinary, False),
+
+ # RDF.langString is not a recognized datatype IRI as we assign no literal value to it, though this should likely change.
+ ("English string", RDF.langString, None),
+
+ # The datatypes IRIs below should never be recognized.
+ ("[p]", EGNS.unrecognized, None),
+ ],
+ )
+ def test_ill_formed_literals(
+ self,
+ lexical: Union[bytes, str],
+ datatype: Optional[URIRef],
+ is_ill_formed: Optional[bool],
+ ) -> None:
+ """
+ ill_formed has the correct value.
+ """
+ lit = Literal(lexical, datatype=datatype)
+ assert lit.ill_formed is is_ill_formed
+ if is_ill_formed is False:
+ # If the literal is not ill formed it should have a value associated with it.
+ assert lit.value is not None
+
class TestNew(unittest.TestCase):
# NOTE: Please use TestNewPT for new tests instead of this which is written
| [
{
"components": [
{
"doc": "For `recognized datatype IRIs\n<https://www.w3.org/TR/rdf11-concepts/#dfn-recognized-datatype-iris>`_,\nthis value will be `True` if the literal is ill formed, otherwise it\nwill be `False`. `Literal.value` (i.e. the `literal value <https://www.w3.org/TR/rdf11-concepts/... | [
"test/test_literal/test_literal.py::TestNewPT::test_ill_formed_literals[true-http://www.w3.org/2001/XMLSchema#boolean-False]",
"test/test_literal/test_literal.py::TestNewPT::test_ill_formed_literals[1-http://www.w3.org/2001/XMLSchema#boolean-False]",
"test/test_literal/test_literal.py::TestNewPT::test_ill_forme... | [
"test/test_literal/test_literal.py::TestLiteral::test_backslash",
"test/test_literal/test_literal.py::TestLiteral::test_literal_from_bool",
"test/test_literal/test_literal.py::TestLiteral::test_repr_apostrophe",
"test/test_literal/test_literal.py::TestLiteral::test_repr_quote",
"test/test_literal/test_liter... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add ability to detect and mark ill-typed literals
Add ability to mark ill-typed literals where the lexical value does not match the datatype given if known
Parameterized test is included to test feature
Fixes #1757
Fixes #848
Related to, but doesn't quite relate to #737
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rdflib/term.py]
(definition of Literal.ill_formed:)
def ill_formed(self) -> Optional[bool]:
"""For `recognized datatype IRIs
<https://www.w3.org/TR/rdf11-concepts/#dfn-recognized-datatype-iris>`_,
this value will be `True` if the literal is ill formed, otherwise it
will be `False`. `Literal.value` (i.e. the `literal value <https://www.w3.org/TR/rdf11-concepts/#dfn-literal-value>`_) should always be defined if this property is `False`, but should not be considered reliable if this property is `True`.
If the literal's datatype is `None` or not in the set of `recognized datatype IRIs
<https://www.w3.org/TR/rdf11-concepts/#dfn-recognized-datatype-iris>`_ this value will be `None`."""
(definition of _well_formed_by_value:)
def _well_formed_by_value(lexical: Union[str, bytes], value: Any) -> bool:
"""This function is used as the fallback for detecting ill-typed/ill-formed
literals and operates on the asumption that if a value (i.e.
`Literal.value`) could be determined for a Literal then it is not
ill-typed/ill-formed.
This function will be called with `Literal.lexical` and `Literal.value` as arguments."""
(definition of _well_formed_unsignedlong:)
def _well_formed_unsignedlong(lexical: Union[str, bytes], value: Any) -> bool:
"""xsd:unsignedInteger and xsd:unsignedLong must not be negative"""
(definition of _well_formed_boolean:)
def _well_formed_boolean(lexical: Union[str, bytes], value: Any) -> bool:
"""Boolean is a datatype with value space {true,false},
lexical space {"true", "false","1","0"} and
lexical-to-value mapping {"true"→true, "false"→false, "1"→true, "0"→false}."""
(definition of _well_formed_int:)
def _well_formed_int(lexical: Union[str, bytes], value: Any) -> bool:
"""The value space of xs:int is the set of common single size integers (32 bits),
i.e., the integers between -2147483648 and 2147483647,
its lexical space allows any number of insignificant leading zeros."""
(definition of _well_formed_unsignedint:)
def _well_formed_unsignedint(lexical: Union[str, bytes], value: Any) -> bool:
"""xsd:unsignedInt has a 32bit value of between 0 and 4294967295"""
(definition of _well_formed_short:)
def _well_formed_short(lexical: Union[str, bytes], value: Any) -> bool:
"""The value space of xs:short is the set of common short integers (16 bits),
i.e., the integers between -32768 and 32767,
its lexical space allows any number of insignificant leading zeros."""
(definition of _well_formed_unsignedshort:)
def _well_formed_unsignedshort(lexical: Union[str, bytes], value: Any) -> bool:
"""xsd:unsignedShort has a 16bit value of between 0 and 65535"""
(definition of _well_formed_byte:)
def _well_formed_byte(lexical: Union[str, bytes], value: Any) -> bool:
"""The value space of xs:byte is the set of common single byte integers (8 bits),
i.e., the integers between -128 and 127,
its lexical space allows any number of insignificant leading zeros."""
(definition of _well_formed_unsignedbyte:)
def _well_formed_unsignedbyte(lexical: Union[str, bytes], value: Any) -> bool:
"""xsd:unsignedByte has a 8bit value of between 0 and 255"""
(definition of _well_formed_non_negative_integer:)
def _well_formed_non_negative_integer(lexical: Union[str, bytes], value: Any) -> bool:
(definition of _well_formed_positive_integer:)
def _well_formed_positive_integer(lexical: Union[str, bytes], value: Any) -> bool:
(definition of _well_formed_non_positive_integer:)
def _well_formed_non_positive_integer(lexical: Union[str, bytes], value: Any) -> bool:
(definition of _well_formed_negative_integer:)
def _well_formed_negative_integer(lexical: Union[str, bytes], value: Any) -> bool:
[end of new definitions in rdflib/term.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
rdflib.Literals need a well-formed / ill-formed status flag
I encountered this issue while working on [pySHACL](https://github.com/RDFLib/pySHACL).
Specifically, this bug is causing a failure in several of the tests in the standard data-shapes-test-suite here [or-datatypes-001.ttl](https://github.com/w3c/data-shapes/blob/gh-pages/data-shapes-test-suite/tests/core/property/or-datatypes-001.ttl) and [datatype-ill-formed.ttl](https://github.com/w3c/data-shapes/blob/gh-pages/data-shapes-test-suite/tests/core/property/datatype-ill-formed.ttl)
This test relies on the assertion that literals such as `"none"^^xsd:boolean` and `"300"^^xsd:byte` should be considered by the validator to be ill-formed literals, so that when checking rules such as `sh:datatype`, if validating that this property value is a well-formed Literal of type `xsd:boolean` or `xsd:byte` (respectively) this should Fail.
Currently, `"none"^^xsd:boolean` is parsed to a Literal with `value=False, datatype=xsd:boolean` and `"300"^^xsd:byte` is parsed to a Literal with `value=int(300) and datatype=xsd:byte`, so the validation checks which should fail actually pass.
An ideal solution would be at Literal-creation time _before_ converting the lexical value to a Python value, check if it is ill-formed first, store that as an `ill_formed` flag on the Literal itself, then do the conversion as normal.
----------
This is potentially related to issue: https://github.com/RDFLib/rdflib/issues/737
--------------------
</issues> | 0c11debb5178157baeac27b735e49a757916d2a6 | |
conan-io__conan-10868 | 10,868 | conan-io/conan | null | f69c0269f9b7b5842f12de2ae444212b9a80a896 | 2022-03-23T23:22:36Z | diff --git a/conans/client/graph/compatibility.py b/conans/client/graph/compatibility.py
new file mode 100644
index 00000000000..d4fee651ce3
--- /dev/null
+++ b/conans/client/graph/compatibility.py
@@ -0,0 +1,32 @@
+from collections import OrderedDict
+
+from conans.errors import conanfile_exception_formatter
+
+
+class BinaryCompatibility:
+ def __init__(self, cache):
+ pass
+
+ def compatibles(self, conanfile):
+ compat_infos = []
+ if hasattr(conanfile, "compatibility") and callable(conanfile.compatibility):
+ with conanfile_exception_formatter(conanfile, "compatibility"):
+ recipe_compatibles = conanfile.compatibility()
+ compat_infos.extend(self._compatible_infos(conanfile, recipe_compatibles))
+
+ conanfile.compatible_packages.extend(compat_infos)
+
+ @staticmethod
+ def _compatible_infos(conanfile, compatibles):
+ result = []
+ if compatibles:
+ for elem in compatibles:
+ compat_info = conanfile.original_info.clone()
+ settings = elem.get("settings")
+ if settings:
+ compat_info.settings.update_values(settings)
+ options = elem.get("options")
+ if options:
+ compat_info.options.update(options_values=OrderedDict(options))
+ result.append(compat_info)
+ return result
diff --git a/conans/client/graph/graph_binaries.py b/conans/client/graph/graph_binaries.py
index b8f8bd1dfed..103832b2a03 100644
--- a/conans/client/graph/graph_binaries.py
+++ b/conans/client/graph/graph_binaries.py
@@ -1,4 +1,5 @@
from conans.client.graph.build_mode import BuildMode
+from conans.client.graph.compatibility import BinaryCompatibility
from conans.client.graph.graph import (BINARY_BUILD, BINARY_CACHE, BINARY_DOWNLOAD, BINARY_MISSING,
BINARY_UPDATE, RECIPE_EDITABLE, BINARY_EDITABLE,
RECIPE_CONSUMER, RECIPE_VIRTUAL, BINARY_SKIP, BINARY_UNKNOWN,
@@ -20,6 +21,7 @@ def __init__(self, cache, output, remote_manager):
# These are the nodes with pref (not including PREV) that have been evaluated
self._evaluated = {} # {pref: [nodes]}
self._fixed_package_id = cache.config.full_transitive_package_id
+ self._compatibility = BinaryCompatibility(self._cache)
@staticmethod
def _check_update(upstream_manifest, package_folder, output):
@@ -199,6 +201,8 @@ def _evaluate_node(self, node, build_mode, update, remotes):
pref = PackageReference(node.ref, node.package_id)
self._process_node(node, pref, build_mode, update, remotes)
if node.binary in (BINARY_MISSING, BINARY_INVALID):
+ conanfile = node.conanfile
+ self._compatibility.compatibles(conanfile)
if node.conanfile.compatible_packages:
compatible_build_mode = BuildMode(None, self._out)
for compatible_package in node.conanfile.compatible_packages:
@@ -371,7 +375,7 @@ def _compute_package_id(self, node, default_package_id_mode, default_python_requ
python_requires=python_requires,
default_python_requires_id_mode=
default_python_requires_id_mode)
-
+ conanfile.original_info = conanfile.info.clone()
if not self._cache.new_config["core.package_id:msvc_visual_incompatible"]:
msvc_compatible = conanfile.info.msvc_compatible()
if msvc_compatible:
diff --git a/conans/model/values.py b/conans/model/values.py
index 032a6fbfacb..8fa33531293 100644
--- a/conans/model/values.py
+++ b/conans/model/values.py
@@ -67,6 +67,18 @@ def loads(cls, text):
result.append((name.strip(), value.strip()))
return cls.from_list(result)
+ def update_values(self, values):
+ """ receives a list of tuples (compiler.version, value)
+ Necessary for binary_compatibility.py
+ """
+ assert isinstance(values, (list, tuple)), values
+ for (name, value) in values:
+ list_settings = name.split(".")
+ attr = self
+ for setting in list_settings[:-1]:
+ attr = getattr(attr, setting)
+ setattr(attr, list_settings[-1], value)
+
def as_list(self, list_all=True):
result = []
for field in self.fields:
| diff --git a/conans/test/integration/package_id/compatible_test.py b/conans/test/integration/package_id/compatible_test.py
index 0ac96707e89..5760dc6b1c4 100644
--- a/conans/test/integration/package_id/compatible_test.py
+++ b/conans/test/integration/package_id/compatible_test.py
@@ -604,3 +604,45 @@ def test_apple_clang_compatible():
'-s build_type=Release -s arch=x86_64')
client.run("install pkg/0.1@ -pr=profile")
assert "Using compatible package" in client.out
+
+
+class TestNewCompatibility:
+
+ def test_compatible_setting(self):
+ c = TestClient()
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+ settings = "os", "compiler"
+
+ def compatibility(self):
+ if self.settings.compiler == "gcc" and self.settings.compiler.version == "4.9":
+ return [{"settings": [("compiler.version", v)]}
+ for v in ("4.8", "4.7", "4.6")]
+
+ def package_info(self):
+ self.output.info("PackageInfo!: Gcc version: %s!"
+ % self.settings.compiler.version)
+ """)
+ profile = textwrap.dedent("""
+ [settings]
+ os = Linux
+ compiler=gcc
+ compiler.version=4.9
+ compiler.libcxx=libstdc++
+ """)
+ c.save({"conanfile.py": conanfile,
+ "myprofile": profile})
+ # Create package with gcc 4.8
+ c.run("create . -pr=myprofile -s compiler.version=4.8")
+ assert "pkg/0.1: Package '22c594d7fed4994c59a1eacb24ff6ff48bc5c51c' created" in c.out
+
+ # package can be used with a profile gcc 4.9 falling back to 4.8 binary
+ c.save({"conanfile.py": GenConanfile().with_require("pkg/0.1")})
+ c.run("install . -pr=myprofile")
+ assert "pkg/0.1: PackageInfo!: Gcc version: 4.8!" in c.out
+ assert "pkg/0.1:22c594d7fed4994c59a1eacb24ff6ff48bc5c51c" in c.out
+ assert "pkg/0.1: Already installed!" in c.out
| [
{
"components": [
{
"doc": "",
"lines": [
6,
32
],
"name": "BinaryCompatibility",
"signature": "class BinaryCompatibility:",
"type": "class"
},
{
"doc": "",
"lines": [
7,
8
],
... | [
"conans/test/integration/package_id/compatible_test.py::TestNewCompatibility::test_compatible_setting"
] | [
"conans/test/integration/package_id/compatible_test.py::CompatibleIDsTest::test_additional_id_mode",
"conans/test/integration/package_id/compatible_test.py::CompatibleIDsTest::test_build_missing",
"conans/test/integration/package_id/compatible_test.py::CompatibleIDsTest::test_compatible_diamond",
"conans/test... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
backport of compatibility() recipe method
Changelog: Feature: Backport of 2.0 compatibility() recipe method.
Docs: omit
Backport of https://github.com/conan-io/conan/pull/10241
I would probably wait 1 release to document or make this public, as the interface might still change
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conans/client/graph/compatibility.py]
(definition of BinaryCompatibility:)
class BinaryCompatibility:
(definition of BinaryCompatibility.__init__:)
def __init__(self, cache):
(definition of BinaryCompatibility.compatibles:)
def compatibles(self, conanfile):
(definition of BinaryCompatibility._compatible_infos:)
def _compatible_infos(conanfile, compatibles):
[end of new definitions in conans/client/graph/compatibility.py]
[start of new definitions in conans/model/values.py]
(definition of Values.update_values:)
def update_values(self, values):
"""receives a list of tuples (compiler.version, value)
Necessary for binary_compatibility.py"""
[end of new definitions in conans/model/values.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
conan-io__conan-10867 | 10,867 | conan-io/conan | null | dc92057aa65c15834451e226577465c2f263e122 | 2022-03-23T22:26:38Z | diff --git a/conan/tools/files/files.py b/conan/tools/files/files.py
index 941c7764969..90a7ca3b78a 100644
--- a/conan/tools/files/files.py
+++ b/conan/tools/files/files.py
@@ -126,7 +126,7 @@ def download(conanfile, url, filename, verify=True, retry=None, retry_wait=None,
:return: None
"""
# TODO: Add all parameters to the new conf
- requester = conanfile._conan_requester
+ requester = conanfile._conan_helpers.requester
config = conanfile.conf
out = ConanOutput()
overwrite = True
diff --git a/conans/cli/conan_app.py b/conans/cli/conan_app.py
index 78babe20102..c2312a09947 100644
--- a/conans/cli/conan_app.py
+++ b/conans/cli/conan_app.py
@@ -1,9 +1,11 @@
+import os
+
from conans.client.cache.cache import ClientCache
from conans.client.graph.proxy import ConanProxy
from conans.client.graph.python_requires import PyRequireLoader
from conans.client.graph.range_resolver import RangeResolver
from conans.client.hook_manager import HookManager
-from conans.client.loader import ConanFileLoader
+from conans.client.loader import ConanFileLoader, load_python_file
from conans.client.remote_manager import RemoteManager
from conans.client.rest.auth_manager import ConanApiAuthManager
from conans.client.rest.conan_requester import ConanRequester
@@ -11,6 +13,27 @@
from conans.errors import ConanException
+class CmdWrapper:
+ def __init__(self, cache):
+ wrapper = os.path.join(cache.cache_folder, "extensions", "plugins", "cmd_wrapper.py")
+ if os.path.isfile(wrapper):
+ mod, _ = load_python_file(wrapper)
+ self._wrapper = mod.cmd_wrapper
+ else:
+ self._wrapper = None
+
+ def wrap(self, cmd):
+ if self._wrapper is None:
+ return cmd
+ return self._wrapper(cmd)
+
+
+class ConanFileHelpers:
+ def __init__(self, requester, cmd_wrapper):
+ self.requester = requester
+ self.cmd_wrapper = cmd_wrapper
+
+
class ConanApp(object):
def __init__(self, cache_folder):
@@ -33,7 +56,9 @@ def __init__(self, cache_folder):
self.range_resolver = RangeResolver(self)
self.pyreq_loader = PyRequireLoader(self.proxy, self.range_resolver)
- self.loader = ConanFileLoader(self.pyreq_loader, self.requester)
+ cmd_wrap = CmdWrapper(self.cache)
+ conanfile_helpers = ConanFileHelpers(self.requester, cmd_wrap)
+ self.loader = ConanFileLoader(self.pyreq_loader, conanfile_helpers)
# Remotes
self.selected_remotes = []
diff --git a/conans/client/loader.py b/conans/client/loader.py
index fa6c87a9947..fe2731ec4f0 100644
--- a/conans/client/loader.py
+++ b/conans/client/loader.py
@@ -18,10 +18,10 @@
class ConanFileLoader:
- def __init__(self, pyreq_loader=None, requester=None):
+ def __init__(self, pyreq_loader=None, conanfile_helpers=None):
self._pyreq_loader = pyreq_loader
self._cached_conanfile_classes = {}
- self._requester = requester
+ self._conanfile_helpers = conanfile_helpers
invalidate_caches()
def load_basic(self, conanfile_path, graph_lock=None, display=""):
@@ -35,7 +35,7 @@ def load_basic_module(self, conanfile_path, graph_lock=None, display=""):
cached = self._cached_conanfile_classes.get(conanfile_path)
if cached:
conanfile = cached[0](display)
- conanfile._conan_requester = self._requester
+ conanfile._conan_helpers = self._conanfile_helpers
if hasattr(conanfile, "init") and callable(conanfile.init):
with conanfile_exception_formatter(conanfile, "init"):
conanfile.init()
@@ -56,7 +56,7 @@ def load_basic_module(self, conanfile_path, graph_lock=None, display=""):
self._cached_conanfile_classes[conanfile_path] = (conanfile, module)
result = conanfile(display)
- result._conan_requester = self._requester
+ result._conan_helpers = self._conanfile_helpers
if hasattr(result, "init") and callable(result.init):
with conanfile_exception_formatter(result, "init"):
result.init()
diff --git a/conans/model/conan_file.py b/conans/model/conan_file.py
index 3c9937bd572..0351c9c3a77 100644
--- a/conans/model/conan_file.py
+++ b/conans/model/conan_file.py
@@ -54,7 +54,7 @@ def __init__(self, display_name=""):
# something that can run commands, as os.sytem
self.compatible_packages = []
- self._conan_requester = None
+ self._conan_helpers = None
from conan.tools.env import Environment
self.buildenv_info = Environment()
self.runenv_info = Environment()
@@ -217,6 +217,7 @@ def package_info(self):
def run(self, command, stdout=None, cwd=None, ignore_errors=False, env=None, quiet=False,
shell=True):
# NOTE: "self.win_bash" is the new parameter "win_bash" for Conan 2.0
+ command = self._conan_helpers.cmd_wrapper.wrap(command)
if platform.system() == "Windows":
if self.win_bash: # New, Conan 2.0
from conans.client.subsystems import run_in_windows_bash
| diff --git a/conans/test/integration/test_plugin_cmd_wrapper.py b/conans/test/integration/test_plugin_cmd_wrapper.py
new file mode 100644
index 00000000000..3912afad037
--- /dev/null
+++ b/conans/test/integration/test_plugin_cmd_wrapper.py
@@ -0,0 +1,27 @@
+import os
+import textwrap
+
+from conans.test.utils.tools import TestClient
+from conans.util.files import save
+
+
+def test_plugin_cmd_wrapper():
+ c = TestClient()
+ plugins = os.path.join(c.cache.cache_folder, "extensions", "plugins")
+ wrapper = textwrap.dedent("""
+ def cmd_wrapper(cmd):
+ return 'echo "{}"'.format(cmd)
+ """)
+ # TODO: Decide name
+ save(os.path.join(plugins, "cmd_wrapper.py"), wrapper)
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ class Pkg(ConanFile):
+ def generate(self):
+ self.run("Hello world")
+ self.run("Other stuff")
+ """)
+ c.save({"conanfile.py": conanfile})
+ c.run("install .")
+ assert 'Hello world' in c.out
+ assert 'Other stuff' in c.out
diff --git a/conans/test/unittests/tools/files/test_downloads.py b/conans/test/unittests/tools/files/test_downloads.py
index 220dd4b0d1e..342963abffa 100644
--- a/conans/test/unittests/tools/files/test_downloads.py
+++ b/conans/test/unittests/tools/files/test_downloads.py
@@ -79,7 +79,7 @@ class TestDownload:
def test_download(self, bottle_server):
dest = os.path.join(temp_folder(), "manual.html")
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
download(conanfile, "http://localhost:%s/manual.html" % bottle_server.port, dest, retry=3,
retry_wait=0)
content = load(dest)
@@ -92,7 +92,7 @@ def test_download(self, bottle_server):
def test_download_iterate_url(self, bottle_server):
dest = os.path.join(temp_folder(), "manual.html")
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
output = RedirectedTestOutput()
with redirect_output(output):
download(conanfile, ["invalid",
@@ -107,7 +107,7 @@ def test_download_forbidden(self, bottle_server):
# Not authorized
with pytest.raises(AuthenticationException) as exc:
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
download(conanfile, "http://localhost:%s/forbidden" % bottle_server.port, dest)
assert "403: Forbidden" in str(exc.value)
@@ -115,7 +115,7 @@ def test_download_authorized(self, bottle_server):
# Not authorized
dest = os.path.join(temp_folder(), "manual.html")
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
with pytest.raises(AuthenticationException):
download(conanfile, "http://localhost:%s/basic-auth/user/passwd" % bottle_server.port,
dest, retry=0, retry_wait=0)
@@ -131,7 +131,7 @@ def test_download_authorized(self, bottle_server):
def test_download_retries_errors(self):
# unreachable server will retry
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
file_path = os.path.join(temp_folder(), "file.txt")
with pytest.raises(ConanException):
output = RedirectedTestOutput()
@@ -142,7 +142,7 @@ def test_download_retries_errors(self):
def test_download_retries_500_errors(self, bottle_server):
# 500 internal also retries
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
file_path = os.path.join(temp_folder(), "file.txt")
with pytest.raises(ConanException):
output = RedirectedTestOutput()
@@ -154,7 +154,7 @@ def test_download_retries_500_errors(self, bottle_server):
def test_download_no_retries_errors(self, bottle_server):
# Not found error will not retry
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
file_path = os.path.join(temp_folder(), "file.txt")
with pytest.raises(ConanException):
download(conanfile, "http://localhost:%s/notexisting" % bottle_server.port, file_path,
@@ -211,7 +211,7 @@ class TestGet:
def test_get_tgz(self, bottle_server_zip):
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
tmp_folder = temp_folder()
with chdir(tmp_folder):
get(conanfile, "http://localhost:%s/sample.tgz" % bottle_server_zip.port,
@@ -220,7 +220,7 @@ def test_get_tgz(self, bottle_server_zip):
def test_get_tgz_strip_root(self, bottle_server_zip):
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
tmp_folder = temp_folder()
with chdir(tmp_folder):
get(conanfile, "http://localhost:%s/sample.tgz" % bottle_server_zip.port,
@@ -229,7 +229,7 @@ def test_get_tgz_strip_root(self, bottle_server_zip):
def test_get_gunzip(self, bottle_server_zip):
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
tmp_folder = temp_folder()
with chdir(tmp_folder):
get(conanfile, "http://localhost:%s/test.txt.gz" % bottle_server_zip.port,
@@ -238,7 +238,7 @@ def test_get_gunzip(self, bottle_server_zip):
def test_get_gunzip_destination(self, bottle_server_zip):
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
tmp_folder = temp_folder()
with chdir(tmp_folder):
get(conanfile, "http://localhost:%s/test.txt.gz" % bottle_server_zip.port,
@@ -247,7 +247,7 @@ def test_get_gunzip_destination(self, bottle_server_zip):
def test_get_gunzip_destination_subfolder(self, bottle_server_zip):
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
tmp_folder = temp_folder()
with chdir(tmp_folder):
get(conanfile, "http://localhost:%s/test.txt.gz" % bottle_server_zip.port,
@@ -257,7 +257,7 @@ def test_get_gunzip_destination_subfolder(self, bottle_server_zip):
def test_get_filename_error(self, bottle_server_zip):
# Test: File name cannot be deduced from '?file=1'
conanfile = ConanFileMock()
- conanfile._conan_requester = requests
+ conanfile._conan_helpers.requester = requests
with pytest.raises(ConanException) as error:
get(conanfile, "http://localhost:%s/?file=1" % bottle_server_zip.port)
assert "Cannot deduce file name from the url" in str(error.value)
diff --git a/conans/test/utils/mocks.py b/conans/test/utils/mocks.py
index 22f5b6f287d..34c828fe7f0 100644
--- a/conans/test/utils/mocks.py
+++ b/conans/test/utils/mocks.py
@@ -3,6 +3,7 @@
from io import StringIO
from conan import ConanFile
+from conans.cli.conan_app import ConanFileHelpers
from conans.model.conf import ConfDefinition, Conf
from conans.model.layout import Folders, Infos
from conans.model.options import Options
@@ -110,6 +111,7 @@ def __init__(self, shared=None, ):
self.env_scripts = {}
self.win_bash = None
self.conf = ConfDefinition().get_conanfile_conf(None)
+ self._conan_helpers = ConanFileHelpers(None, None)
def run(self, command, win_bash=False, subsystem=None, env=None, ignore_errors=False):
assert win_bash is False
| [
{
"components": [
{
"doc": "",
"lines": [
16,
28
],
"name": "CmdWrapper",
"signature": "class CmdWrapper:",
"type": "class"
},
{
"doc": "",
"lines": [
17,
23
],
"name": "CmdW... | [
"conans/test/integration/test_plugin_cmd_wrapper.py::test_plugin_cmd_wrapper",
"conans/test/unittests/tools/files/test_downloads.py::TestDownload::test_download",
"conans/test/unittests/tools/files/test_downloads.py::TestDownload::test_download_iterate_url",
"conans/test/unittests/tools/files/test_downloads.p... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
[develop2] new cmd_wrapper plugin
Close https://github.com/conan-io/conan/issues/10426
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conans/cli/conan_app.py]
(definition of CmdWrapper:)
class CmdWrapper:
(definition of CmdWrapper.__init__:)
def __init__(self, cache):
(definition of CmdWrapper.wrap:)
def wrap(self, cmd):
(definition of ConanFileHelpers:)
class ConanFileHelpers:
(definition of ConanFileHelpers.__init__:)
def __init__(self, requester, cmd_wrapper):
[end of new definitions in conans/cli/conan_app.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
pydata__xarray-6400 | 6,400 | pydata/xarray | 2022.03 | 728b648d5c7c3e22fe3704ba163012840408bf66 | 2022-03-22T12:57:37Z | diff --git a/doc/whats-new.rst b/doc/whats-new.rst
index 39aaf8e2954..e01bdf93b00 100644
--- a/doc/whats-new.rst
+++ b/doc/whats-new.rst
@@ -32,6 +32,10 @@ New Features
- Multi-index levels are now accessible through their own, regular coordinates
instead of virtual coordinates (:pull:`5692`).
By `Benoît Bovy <https://github.com/benbovy>`_.
+- Add a ``display_values_threshold`` option to control the total number of array
+ elements which trigger summarization rather than full repr in (numpy) array
+ detailed views of the html repr (:pull:`6400`).
+ By `Benoît Bovy <https://github.com/benbovy>`_.
Breaking changes
~~~~~~~~~~~~~~~~
@@ -60,6 +64,8 @@ Bug fixes
- Fixed "unhashable type" error trying to read NetCDF file with variable having its 'units'
attribute not ``str`` (e.g. ``numpy.ndarray``) (:issue:`6368`).
By `Oleh Khoma <https://github.com/okhoma>`_.
+- Fixed the poor html repr performance on large multi-indexes (:pull:`6400`).
+ By `Benoît Bovy <https://github.com/benbovy>`_.
- Allow fancy indexing of duck dask arrays along multiple dimensions. (:pull:`6414`)
By `Justus Magin <https://github.com/keewis>`_.
diff --git a/xarray/core/formatting.py b/xarray/core/formatting.py
index 81617ae38f9..e372e3bdd40 100644
--- a/xarray/core/formatting.py
+++ b/xarray/core/formatting.py
@@ -520,7 +520,11 @@ def short_numpy_repr(array):
# default to lower precision so a full (abbreviated) line can fit on
# one line with the default display_width
- options = {"precision": 6, "linewidth": OPTIONS["display_width"], "threshold": 200}
+ options = {
+ "precision": 6,
+ "linewidth": OPTIONS["display_width"],
+ "threshold": OPTIONS["display_values_threshold"],
+ }
if array.ndim < 3:
edgeitems = 3
elif array.ndim == 3:
diff --git a/xarray/core/indexing.py b/xarray/core/indexing.py
index c8851788c29..27bd4954bc4 100644
--- a/xarray/core/indexing.py
+++ b/xarray/core/indexing.py
@@ -5,6 +5,7 @@
from contextlib import suppress
from dataclasses import dataclass, field
from datetime import timedelta
+from html import escape
from typing import (
TYPE_CHECKING,
Any,
@@ -25,6 +26,7 @@
from . import duck_array_ops, nputils, utils
from .npcompat import DTypeLike
+from .options import OPTIONS
from .pycompat import dask_version, integer_types, is_duck_dask_array, sparse_array_type
from .types import T_Xarray
from .utils import either_dict_or_kwargs, get_valid_numpy_dtype
@@ -1507,23 +1509,31 @@ def __repr__(self) -> str:
)
return f"{type(self).__name__}{props}"
- def _repr_inline_(self, max_width) -> str:
- # special implementation to speed-up the repr for big multi-indexes
+ def _get_array_subset(self) -> np.ndarray:
+ # used to speed-up the repr for big multi-indexes
+ threshold = max(100, OPTIONS["display_values_threshold"] + 2)
+ if self.size > threshold:
+ pos = threshold // 2
+ indices = np.concatenate([np.arange(0, pos), np.arange(-pos, 0)])
+ subset = self[OuterIndexer((indices,))]
+ else:
+ subset = self
+
+ return np.asarray(subset)
+
+ def _repr_inline_(self, max_width: int) -> str:
+ from .formatting import format_array_flat
+
if self.level is None:
return "MultiIndex"
else:
- from .formatting import format_array_flat
+ return format_array_flat(self._get_array_subset(), max_width)
- if self.size > 100 and max_width < self.size:
- n_values = max_width
- indices = np.concatenate(
- [np.arange(0, n_values), np.arange(-n_values, 0)]
- )
- subset = self[OuterIndexer((indices,))]
- else:
- subset = self
+ def _repr_html_(self) -> str:
+ from .formatting import short_numpy_repr
- return format_array_flat(np.asarray(subset), max_width)
+ array_repr = short_numpy_repr(self._get_array_subset())
+ return f"<pre>{escape(array_repr)}</pre>"
def copy(self, deep: bool = True) -> "PandasMultiIndexingAdapter":
# see PandasIndexingAdapter.copy
diff --git a/xarray/core/options.py b/xarray/core/options.py
index 0c45e126fe6..399afe90b66 100644
--- a/xarray/core/options.py
+++ b/xarray/core/options.py
@@ -15,6 +15,7 @@ class T_Options(TypedDict):
cmap_divergent: Union[str, "Colormap"]
cmap_sequential: Union[str, "Colormap"]
display_max_rows: int
+ display_values_threshold: int
display_style: Literal["text", "html"]
display_width: int
display_expand_attrs: Literal["default", True, False]
@@ -33,6 +34,7 @@ class T_Options(TypedDict):
"cmap_divergent": "RdBu_r",
"cmap_sequential": "viridis",
"display_max_rows": 12,
+ "display_values_threshold": 200,
"display_style": "html",
"display_width": 80,
"display_expand_attrs": "default",
@@ -57,6 +59,7 @@ def _positive_integer(value):
_VALIDATORS = {
"arithmetic_join": _JOIN_OPTIONS.__contains__,
"display_max_rows": _positive_integer,
+ "display_values_threshold": _positive_integer,
"display_style": _DISPLAY_OPTIONS.__contains__,
"display_width": _positive_integer,
"display_expand_attrs": lambda choice: choice in [True, False, "default"],
@@ -154,6 +157,9 @@ class set_options:
* ``default`` : to expand unless over a pre-defined limit
display_max_rows : int, default: 12
Maximum display rows.
+ display_values_threshold : int, default: 200
+ Total number of array elements which trigger summarization rather
+ than full repr for variable data views (numpy arrays).
display_style : {"text", "html"}, default: "html"
Display style to use in jupyter for xarray objects.
display_width : int, default: 80
| diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py
index 105cec7e850..efdb8a57288 100644
--- a/xarray/tests/test_formatting.py
+++ b/xarray/tests/test_formatting.py
@@ -479,6 +479,12 @@ def test_short_numpy_repr() -> None:
num_lines = formatting.short_numpy_repr(array).count("\n") + 1
assert num_lines < 30
+ # threshold option (default: 200)
+ array = np.arange(100)
+ assert "..." not in formatting.short_numpy_repr(array)
+ with xr.set_options(display_values_threshold=10):
+ assert "..." in formatting.short_numpy_repr(array)
+
def test_large_array_repr_length() -> None:
| diff --git a/doc/whats-new.rst b/doc/whats-new.rst
index 39aaf8e2954..e01bdf93b00 100644
--- a/doc/whats-new.rst
+++ b/doc/whats-new.rst
@@ -32,6 +32,10 @@ New Features
- Multi-index levels are now accessible through their own, regular coordinates
instead of virtual coordinates (:pull:`5692`).
By `Benoît Bovy <https://github.com/benbovy>`_.
+- Add a ``display_values_threshold`` option to control the total number of array
+ elements which trigger summarization rather than full repr in (numpy) array
+ detailed views of the html repr (:pull:`6400`).
+ By `Benoît Bovy <https://github.com/benbovy>`_.
Breaking changes
~~~~~~~~~~~~~~~~
@@ -60,6 +64,8 @@ Bug fixes
- Fixed "unhashable type" error trying to read NetCDF file with variable having its 'units'
attribute not ``str`` (e.g. ``numpy.ndarray``) (:issue:`6368`).
By `Oleh Khoma <https://github.com/okhoma>`_.
+- Fixed the poor html repr performance on large multi-indexes (:pull:`6400`).
+ By `Benoît Bovy <https://github.com/benbovy>`_.
- Allow fancy indexing of duck dask arrays along multiple dimensions. (:pull:`6414`)
By `Justus Magin <https://github.com/keewis>`_.
| [
{
"components": [
{
"doc": "",
"lines": [
1512,
1522
],
"name": "PandasMultiIndexingAdapter._get_array_subset",
"signature": "def _get_array_subset(self) -> np.ndarray:",
"type": "function"
},
{
"doc": "",
"lin... | [
"xarray/tests/test_formatting.py::test_short_numpy_repr"
] | [
"xarray/tests/test_formatting.py::TestFormatting::test_get_indexer_at_least_n_items",
"xarray/tests/test_formatting.py::TestFormatting::test_first_n_items",
"xarray/tests/test_formatting.py::TestFormatting::test_last_n_items",
"xarray/tests/test_formatting.py::TestFormatting::test_last_item",
"xarray/tests/... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Speed-up multi-index html repr + add display_values_threshold option
This adds `PandasMultiIndexingAdapter._repr_html_` that can greatly speed-up the html repr of Xarray objects with
multi-indexes.
This optimized `_repr_html_` implementation is now used for formatting the array detailed view of all multi-index coordinates in the html repr, instead of converting the full index and each levels to numpy arrays before formatting them.
```python
import xarray as xr
ds = xr.tutorial.load_dataset("air_temperature")
da = ds["air"].stack(z=[...])
da.shape
# (3869000,)
%timeit -n 1 -r 1 da._repr_html_()
# 9.96 ms !
```
<!-- Feel free to remove check-list items aren't relevant to your change -->
- [x] Closes #5529
- [x] User visible changes (including notable bug fixes) are documented in `whats-new.rst`
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in xarray/core/indexing.py]
(definition of PandasMultiIndexingAdapter._get_array_subset:)
def _get_array_subset(self) -> np.ndarray:
(definition of PandasMultiIndexingAdapter._repr_html_:)
def _repr_html_(self) -> str:
[end of new definitions in xarray/core/indexing.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Very poor html repr performance on large multi-indexes
<!-- Please include a self-contained copy-pastable example that generates the issue if possible.
Please be concise with code posted. See guidelines below on how to provide a good bug report:
- Craft Minimal Bug Reports: http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports
- Minimal Complete Verifiable Examples: https://stackoverflow.com/help/mcve
Bug reports that follow these guidelines are easier to diagnose, and so are often handled much more quickly.
-->
**What happened**:
We have catestrophic performance on the html repr of some long multi-indexed data arrays. Here's a case of it taking 12s.
**Minimal Complete Verifiable Example**:
```python
import xarray as xr
ds = xr.tutorial.load_dataset("air_temperature")
da = ds["air"].stack(z=[...])
da.shape
# (3869000,)
%timeit -n 1 -r 1 da._repr_html_()
# 12.4 s !!
```
**Anything else we need to know?**:
I thought we'd fixed some issues here: https://github.com/pydata/xarray/pull/4846/files
**Environment**:
<details><summary>Output of <tt>xr.show_versions()</tt></summary>
INSTALLED VERSIONS
------------------
commit: None
python: 3.8.10 (default, May 9 2021, 13:21:55)
[Clang 12.0.5 (clang-1205.0.22.9)]
python-bits: 64
OS: Darwin
OS-release: 20.4.0
machine: x86_64
processor: i386
byteorder: little
LC_ALL: None
LANG: None
LOCALE: ('en_US', 'UTF-8')
libhdf5: None
libnetcdf: None
xarray: 0.18.2
pandas: 1.2.4
numpy: 1.20.3
scipy: 1.6.3
netCDF4: None
pydap: None
h5netcdf: None
h5py: None
Nio: None
zarr: 2.8.3
cftime: 1.4.1
nc_time_axis: None
PseudoNetCDF: None
rasterio: 1.2.3
cfgrib: None
iris: None
bottleneck: 1.3.2
dask: 2021.06.1
distributed: 2021.06.1
matplotlib: 3.4.2
cartopy: None
seaborn: 0.11.1
numbagg: 0.2.1
pint: None
setuptools: 56.0.0
pip: 21.1.2
conda: None
pytest: 6.2.4
IPython: 7.24.0
sphinx: 4.0.1
</details>
----------
I think it's some lazy calculation that kicks in. Because I can reproduce using np.asarray.
```python
import numpy as np
import xarray as xr
ds = xr.tutorial.load_dataset("air_temperature")
da = ds["air"].stack(z=[...])
coord = da.z.variable.to_index_variable()
# This is very slow:
a = np.asarray(coord)
da._repr_html_()
```

Yes, I think it's materializing the multiindex as an array of tuples. Which we definitely shouldn't be doing for reprs.
@Illviljan nice profiling view! What is that?
One way of solving it could be to slice the arrays to a smaller size but still showing the same repr. Because `coords[0:12]` seems easy to print, not sure how tricky it is to slice it in this way though.
I'm using https://github.com/spyder-ide/spyder for the profiling and general hacking.
Yes very much so @Illviljan . But weirdly the linked PR is attempting to do that — so maybe this code path doesn't hit that change?
Spyder's profiler looks good!
> But weirdly the linked PR is attempting to do that — so maybe this code path doesn't hit that change?
I think the linked PR only fixed the summary (inline) repr. The bottleneck here is when formatting the array detailed view for the multi-index coordinates, which triggers the conversion of the whole pandas MultiIndex (tuple elements) and each of its levels as a numpy arrays.
--------------------
</issues> | 728b648d5c7c3e22fe3704ba163012840408bf66 |
pypa__hatch-164 | 164 | pypa/hatch | null | 66ff7195edb872fc36dadf61775640aa41820157 | 2022-03-19T19:37:04Z | diff --git a/docs/environment.md b/docs/environment.md
index 313d9328f..f72a58f32 100644
--- a/docs/environment.md
+++ b/docs/environment.md
@@ -174,18 +174,26 @@ Every environment can define its own set of [matrices](config/environment.md#mat
Using the [`env show`](cli/reference.md#hatch-env-show) command would then display:
```console
-$ hatch env show
-default
-
-[test]
-py27-42
-py27-3.14
-py38-42
-py38-3.14
-py38-9000-foo
-py38-9000-bar
-py39-9000-foo
-py39-9000-bar
+$ hatch env show --ascii
+ Standalone
++---------+---------+
+| Name | Type |
++=========+=========+
+| default | virtual |
++---------+---------+
+ Matrices
++------+---------+--------------------+--------------+
+| Name | Type | Envs | Dependencies |
++======+=========+====================+==============+
+| test | virtual | test.py27-42 | pytest |
+| | | test.py27-3.14 | |
+| | | test.py38-42 | |
+| | | test.py38-3.14 | |
+| | | test.py38-9000-foo | |
+| | | test.py38-9000-bar | |
+| | | test.py39-9000-foo | |
+| | | test.py39-9000-bar | |
++------+---------+--------------------+--------------+
```
## Removal
diff --git a/docs/meta/history.md b/docs/meta/history.md
index ca7528c34..d20073150 100644
--- a/docs/meta/history.md
+++ b/docs/meta/history.md
@@ -10,6 +10,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
### Unreleased
+***Added:***
+
+- Add ability to select specific environments for command execution of matrices based on variables
+
### [1.0.0rc15](https://github.com/ofek/hatch/releases/tag/hatch-v1.0.0rc15) - 2022-03-18 ### {: #hatch-v1.0.0rc15 }
***Added:***
diff --git a/src/hatch/cli/run/__init__.py b/src/hatch/cli/run/__init__.py
index d55912623..8be377a6e 100644
--- a/src/hatch/cli/run/__init__.py
+++ b/src/hatch/cli/run/__init__.py
@@ -1,16 +1,107 @@
import click
+def parse_variable_filter(argument):
+ variable, _, values = argument[1:].partition('=')
+ if variable == 'py':
+ variable = 'python'
+
+ parsed_values = set(values.split(',')) if values else set()
+ return variable, parsed_values
+
+
+def select_matrix_environments(environments, included_variables, excluded_variables):
+ selected_environments = []
+ for env_name, variables in environments.items():
+ for variable, value in variables.items():
+ if variable in excluded_variables:
+ excluded_values = excluded_variables[variable]
+ if not excluded_values or value in excluded_values:
+ break
+
+ if included_variables:
+ if variable not in included_variables:
+ break
+ else:
+ included_values = included_variables[variable]
+ if included_values and value not in included_values:
+ break
+ else:
+ selected_environments.append(env_name)
+
+ return selected_environments
+
+
@click.command(
- short_help="Run a command within a project's environment",
+ short_help='Run commands within project environments',
context_settings={'help_option_names': [], 'ignore_unknown_options': True},
)
@click.argument('args', metavar='[ENV:]ARGS...', required=True, nargs=-1)
@click.pass_obj
def run(app, args):
- """Run commands within a project's environment."""
+ """
+ Run commands within project environments.
+
+ If the first argument contains a colon, then the preceding component will be
+ interpreted as the name of the environment to target, overriding the `-e`/`--env`
+ [root option](#hatch) and the `HATCH_ENV` environment variable.
+
+ If the environment provides matrices, then you may also provide leading arguments
+ starting with a `+` or `-` to select or exclude certain variables, optionally
+ followed by specific comma-separated values. For example, if you have the
+ following configuration:
+
+ === ":octicons-file-code-16: pyproject.toml"
+
+ ```toml
+ [[tool.hatch.envs.test.matrix]]
+ python = ["39", "310"]
+ version = ["42", "3.14", "9000"]
+ ```
+
+ === ":octicons-file-code-16: hatch.toml"
+
+ ```toml
+ [[envs.test.matrix]]
+ python = ["39", "310"]
+ version = ["42", "3.14", "9000"]
+ ```
+
+ then running:
+
+ ```
+ hatch run +py=310 -version=9000 test:pytest
+ ```
+
+ would execute `pytest` in the environments `test.py310-42` and `test.py310-3.14`.
+ Note that `py` may be used as an alias for `python`.
+ """
project = app.project
+ command_start = 0
+ included_variables = {}
+ excluded_variables = {}
+ for i, arg in enumerate(args):
+ command_start = i
+ if arg.startswith('+'):
+ variable, values = parse_variable_filter(arg)
+ if variable in included_variables:
+ app.abort(f'Duplicate included variable: {variable}')
+ included_variables[variable] = values
+ elif arg.startswith('-'):
+ variable, values = parse_variable_filter(arg)
+ if variable in excluded_variables:
+ app.abort(f'Duplicate excluded variable: {variable}')
+ excluded_variables[variable] = values
+ else:
+ break
+ else:
+ command_start += 1
+
+ args = args[command_start:]
+ if not args:
+ app.abort('Missing argument `MATRIX:ARGS...`')
+
command, *args = args
env_name, separator, command = command.rpartition(':')
if not separator:
@@ -33,8 +124,17 @@ def run(app, args):
is_matrix = False
if env_name in project.config.matrices:
is_matrix = True
- environments = list(project.config.matrices[env_name]['envs'])
+ env_data = project.config.matrices[env_name]['envs']
+ if not env_data:
+ app.abort(f'No variables defined for matrix: {env_name}')
+
+ environments = select_matrix_environments(env_data, included_variables, excluded_variables)
+ if not environments:
+ app.abort('No environments were selected')
else:
+ if included_variables or excluded_variables:
+ app.abort(f'Variable selection is unsupported for non-matrix environment: {env_name}')
+
environments = [env_name]
any_compatible = False
diff --git a/src/hatch/project/config.py b/src/hatch/project/config.py
index 55bf71390..f6a042c64 100644
--- a/src/hatch/project/config.py
+++ b/src/hatch/project/config.py
@@ -255,13 +255,15 @@ def envs(self):
new_env_name = f'{env_name}.{new_env_name}'
# Save the generated environment
- all_envs[new_env_name] = variable_values
final_config[new_env_name] = new_config
cached_overrides[new_env_name] = {
'platform': current_cached_overrides['platform'],
'env': current_cached_overrides['env'],
'matrix': cached_matrix_overrides,
}
+ all_envs[new_env_name] = variable_values
+ if 'py' in variable_values:
+ all_envs[new_env_name] = {'python': variable_values.pop('py'), **variable_values}
# Remove the root matrix generator
del cached_overrides[env_name]
| diff --git a/tests/cli/run/test_run.py b/tests/cli/run/test_run.py
index e40474fab..ab68c3cec 100644
--- a/tests/cli/run/test_run.py
+++ b/tests/cli/run/test_run.py
@@ -391,6 +391,38 @@ def test_error(hatch, helpers, temp_dir, config_file):
assert not output_file.is_file()
+def test_matrix_no_environments(hatch, helpers, temp_dir, config_file):
+ config_file.model.template.plugins['default']['tests'] = False
+ config_file.save()
+
+ project_name = 'My App'
+
+ with temp_dir.as_cwd():
+ result = hatch('new', project_name)
+
+ assert result.exit_code == 0, result.output
+
+ project_path = temp_dir / 'my-app'
+ data_path = temp_dir / 'data'
+ data_path.mkdir()
+
+ project = Project(project_path)
+ helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
+ helpers.update_project_environment(project, 'test', {'matrix': []})
+
+ with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
+ result = hatch(
+ 'run', 'test:python', '-c', "import os,sys;open('test.txt', 'a').write(sys.executable+os.linesep[-1])"
+ )
+
+ assert result.exit_code == 1, result.output
+ assert result.output == helpers.dedent(
+ """
+ No variables defined for matrix: test
+ """
+ )
+
+
def test_matrix(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
@@ -806,3 +838,355 @@ def test_env_detection_override(hatch, helpers, temp_dir, config_file):
python_path = str(output_file.read_text()).strip()
assert str(env_dirs[1]) in python_path
+
+
+def test_matrix_variable_selection_no_command(hatch, helpers, temp_dir, config_file):
+ config_file.model.template.plugins['default']['tests'] = False
+ config_file.save()
+
+ project_name = 'My App'
+
+ with temp_dir.as_cwd():
+ result = hatch('new', project_name)
+
+ assert result.exit_code == 0, result.output
+
+ project_path = temp_dir / 'my-app'
+ data_path = temp_dir / 'data'
+ data_path.mkdir()
+
+ project = Project(project_path)
+ helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
+ helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}]})
+
+ with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
+ result = hatch('run', '+version=9000')
+
+ assert result.exit_code == 1, result.output
+ assert result.output == helpers.dedent(
+ """
+ Missing argument `MATRIX:ARGS...`
+ """
+ )
+
+
+def test_matrix_variable_selection_duplicate_inclusion(hatch, helpers, temp_dir, config_file):
+ config_file.model.template.plugins['default']['tests'] = False
+ config_file.save()
+
+ project_name = 'My App'
+
+ with temp_dir.as_cwd():
+ result = hatch('new', project_name)
+
+ assert result.exit_code == 0, result.output
+
+ project_path = temp_dir / 'my-app'
+ data_path = temp_dir / 'data'
+ data_path.mkdir()
+
+ project = Project(project_path)
+ helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
+ helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}]})
+
+ with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
+ result = hatch('run', '+version=9000', '+version=42')
+
+ assert result.exit_code == 1, result.output
+ assert result.output == helpers.dedent(
+ """
+ Duplicate included variable: version
+ """
+ )
+
+
+def test_matrix_variable_selection_duplicate_exclusion(hatch, helpers, temp_dir, config_file):
+ config_file.model.template.plugins['default']['tests'] = False
+ config_file.save()
+
+ project_name = 'My App'
+
+ with temp_dir.as_cwd():
+ result = hatch('new', project_name)
+
+ assert result.exit_code == 0, result.output
+
+ project_path = temp_dir / 'my-app'
+ data_path = temp_dir / 'data'
+ data_path.mkdir()
+
+ project = Project(project_path)
+ helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
+ helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}]})
+
+ with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
+ result = hatch('run', '-version=9000', '-version=42')
+
+ assert result.exit_code == 1, result.output
+ assert result.output == helpers.dedent(
+ """
+ Duplicate excluded variable: version
+ """
+ )
+
+
+def test_matrix_variable_selection_python_alias(hatch, helpers, temp_dir, config_file):
+ config_file.model.template.plugins['default']['tests'] = False
+ config_file.save()
+
+ project_name = 'My App'
+
+ with temp_dir.as_cwd():
+ result = hatch('new', project_name)
+
+ assert result.exit_code == 0, result.output
+
+ project_path = temp_dir / 'my-app'
+ data_path = temp_dir / 'data'
+ data_path.mkdir()
+
+ project = Project(project_path)
+ helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
+ helpers.update_project_environment(project, 'test', {'matrix': [{'python': ['9000', '42']}]})
+
+ with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
+ result = hatch('run', '+py=9000', '+python=42')
+
+ assert result.exit_code == 1, result.output
+ assert result.output == helpers.dedent(
+ """
+ Duplicate included variable: python
+ """
+ )
+
+
+def test_matrix_variable_selection_not_matrix(hatch, helpers, temp_dir, config_file):
+ config_file.model.template.plugins['default']['tests'] = False
+ config_file.save()
+
+ project_name = 'My App'
+
+ with temp_dir.as_cwd():
+ result = hatch('new', project_name)
+
+ assert result.exit_code == 0, result.output
+
+ project_path = temp_dir / 'my-app'
+ data_path = temp_dir / 'data'
+ data_path.mkdir()
+
+ project = Project(project_path)
+ helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
+ helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}]})
+
+ with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
+ result = hatch(
+ 'run',
+ '+version=9000',
+ 'python',
+ '-c',
+ "import os,sys;open('test.txt', 'a').write(sys.executable+os.linesep[-1])",
+ )
+
+ assert result.exit_code == 1, result.output
+ assert result.output == helpers.dedent(
+ """
+ Variable selection is unsupported for non-matrix environment: default
+ """
+ )
+
+
+def test_matrix_variable_selection_inclusion(hatch, helpers, temp_dir, config_file):
+ config_file.model.template.plugins['default']['tests'] = False
+ config_file.save()
+
+ project_name = 'My App'
+
+ with temp_dir.as_cwd():
+ result = hatch('new', project_name)
+
+ assert result.exit_code == 0, result.output
+
+ project_path = temp_dir / 'my-app'
+ data_path = temp_dir / 'data'
+ data_path.mkdir()
+
+ project = Project(project_path)
+ helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
+ helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}]})
+
+ with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
+ result = hatch(
+ 'run',
+ '+version=9000',
+ 'test:python',
+ '-c',
+ "import os,sys;open('test.txt', 'a').write(sys.executable+os.linesep[-1])",
+ )
+
+ assert result.exit_code == 0, result.output
+ assert result.output == helpers.dedent(
+ """
+ ────────────────────────────────── test.9000 ───────────────────────────────────
+ Creating environment: test.9000
+ """
+ )
+ output_file = project_path / 'test.txt'
+ assert output_file.is_file()
+
+ env_data_path = data_path / 'env' / 'virtual'
+ assert env_data_path.is_dir()
+
+ storage_dirs = list(env_data_path.iterdir())
+ assert len(storage_dirs) == 1
+
+ storage_path = storage_dirs[0]
+
+ project_part = f'{project_path.name}-'
+ assert storage_path.name.startswith(project_part)
+
+ hash_part = storage_path.name[len(project_part) :]
+ assert len(hash_part) == 8
+
+ env_dirs = list(storage_path.iterdir())
+ assert len(env_dirs) == 1
+
+ env_path = env_dirs[0]
+ assert env_path.name == 'test.9000'
+
+ python_path = str(output_file.read_text()).strip()
+ assert str(env_path) in python_path
+
+
+def test_matrix_variable_selection_exclusion(hatch, helpers, temp_dir, config_file):
+ config_file.model.template.plugins['default']['tests'] = False
+ config_file.save()
+
+ project_name = 'My App'
+
+ with temp_dir.as_cwd():
+ result = hatch('new', project_name)
+
+ assert result.exit_code == 0, result.output
+
+ project_path = temp_dir / 'my-app'
+ data_path = temp_dir / 'data'
+ data_path.mkdir()
+
+ project = Project(project_path)
+ helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
+ helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}]})
+
+ with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
+ result = hatch(
+ 'run',
+ '-version=9000',
+ 'test:python',
+ '-c',
+ "import os,sys;open('test.txt', 'a').write(sys.executable+os.linesep[-1])",
+ )
+
+ assert result.exit_code == 0, result.output
+ assert result.output == helpers.dedent(
+ """
+ ─────────────────────────────────── test.42 ────────────────────────────────────
+ Creating environment: test.42
+ """
+ )
+ output_file = project_path / 'test.txt'
+ assert output_file.is_file()
+
+ env_data_path = data_path / 'env' / 'virtual'
+ assert env_data_path.is_dir()
+
+ storage_dirs = list(env_data_path.iterdir())
+ assert len(storage_dirs) == 1
+
+ storage_path = storage_dirs[0]
+
+ project_part = f'{project_path.name}-'
+ assert storage_path.name.startswith(project_part)
+
+ hash_part = storage_path.name[len(project_part) :]
+ assert len(hash_part) == 8
+
+ env_dirs = list(storage_path.iterdir())
+ assert len(env_dirs) == 1
+
+ env_path = env_dirs[0]
+ assert env_path.name == 'test.42'
+
+ python_path = str(output_file.read_text()).strip()
+ assert str(env_path) in python_path
+
+
+def test_matrix_variable_selection_exclude_all(hatch, helpers, temp_dir, config_file):
+ config_file.model.template.plugins['default']['tests'] = False
+ config_file.save()
+
+ project_name = 'My App'
+
+ with temp_dir.as_cwd():
+ result = hatch('new', project_name)
+
+ assert result.exit_code == 0, result.output
+
+ project_path = temp_dir / 'my-app'
+ data_path = temp_dir / 'data'
+ data_path.mkdir()
+
+ project = Project(project_path)
+ helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
+ helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}]})
+
+ with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
+ result = hatch(
+ 'run',
+ '-version',
+ 'test:python',
+ '-c',
+ "import os,sys;open('test.txt', 'a').write(sys.executable+os.linesep[-1])",
+ )
+
+ assert result.exit_code == 1, result.output
+ assert result.output == helpers.dedent(
+ """
+ No environments were selected
+ """
+ )
+
+
+def test_matrix_variable_selection_include_none(hatch, helpers, temp_dir, config_file):
+ config_file.model.template.plugins['default']['tests'] = False
+ config_file.save()
+
+ project_name = 'My App'
+
+ with temp_dir.as_cwd():
+ result = hatch('new', project_name)
+
+ assert result.exit_code == 0, result.output
+
+ project_path = temp_dir / 'my-app'
+ data_path = temp_dir / 'data'
+ data_path.mkdir()
+
+ project = Project(project_path)
+ helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
+ helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}]})
+
+ with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
+ result = hatch(
+ 'run',
+ '+version=3.14',
+ 'test:python',
+ '-c',
+ "import os,sys;open('test.txt', 'a').write(sys.executable+os.linesep[-1])",
+ )
+
+ assert result.exit_code == 1, result.output
+ assert result.output == helpers.dedent(
+ """
+ No environments were selected
+ """
+ )
diff --git a/tests/project/test_config.py b/tests/project/test_config.py
index c97e5a5c1..07f895b80 100644
--- a/tests/project/test_config.py
+++ b/tests/project/test_config.py
@@ -48,6 +48,8 @@ def construct_matrix_data(env_name, config, overrides=None):
new_env_name = f'{env_name}.{new_env_name}'
envs[new_env_name] = variable_values
+ if 'py' in variable_values:
+ envs[new_env_name] = {'python': variable_values.pop('py'), **variable_values}
config.update(overrides or {})
config.setdefault('type', 'virtual')
| diff --git a/docs/environment.md b/docs/environment.md
index 313d9328f..f72a58f32 100644
--- a/docs/environment.md
+++ b/docs/environment.md
@@ -174,18 +174,26 @@ Every environment can define its own set of [matrices](config/environment.md#mat
Using the [`env show`](cli/reference.md#hatch-env-show) command would then display:
```console
-$ hatch env show
-default
-
-[test]
-py27-42
-py27-3.14
-py38-42
-py38-3.14
-py38-9000-foo
-py38-9000-bar
-py39-9000-foo
-py39-9000-bar
+$ hatch env show --ascii
+ Standalone
++---------+---------+
+| Name | Type |
++=========+=========+
+| default | virtual |
++---------+---------+
+ Matrices
++------+---------+--------------------+--------------+
+| Name | Type | Envs | Dependencies |
++======+=========+====================+==============+
+| test | virtual | test.py27-42 | pytest |
+| | | test.py27-3.14 | |
+| | | test.py38-42 | |
+| | | test.py38-3.14 | |
+| | | test.py38-9000-foo | |
+| | | test.py38-9000-bar | |
+| | | test.py39-9000-foo | |
+| | | test.py39-9000-bar | |
++------+---------+--------------------+--------------+
```
## Removal
diff --git a/docs/meta/history.md b/docs/meta/history.md
index ca7528c34..d20073150 100644
--- a/docs/meta/history.md
+++ b/docs/meta/history.md
@@ -10,6 +10,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
### Unreleased
+***Added:***
+
+- Add ability to select specific environments for command execution of matrices based on variables
+
### [1.0.0rc15](https://github.com/ofek/hatch/releases/tag/hatch-v1.0.0rc15) - 2022-03-18 ### {: #hatch-v1.0.0rc15 }
***Added:***
| [
{
"components": [
{
"doc": "",
"lines": [
4,
10
],
"name": "parse_variable_filter",
"signature": "def parse_variable_filter(argument):",
"type": "function"
},
{
"doc": "",
"lines": [
13,
32
... | [
"tests/cli/run/test_run.py::test_matrix_no_environments",
"tests/cli/run/test_run.py::test_matrix_variable_selection_no_command",
"tests/cli/run/test_run.py::test_matrix_variable_selection_duplicate_inclusion",
"tests/cli/run/test_run.py::test_matrix_variable_selection_duplicate_exclusion",
"tests/cli/run/t... | [
"tests/cli/run/test_run.py::test_automatic_creation",
"tests/cli/run/test_run.py::test_enter_project_directory",
"tests/cli/run/test_run.py::test_sync_dependencies",
"tests/cli/run/test_run.py::test_scripts",
"tests/cli/run/test_run.py::test_scripts_specific_environment",
"tests/cli/run/test_run.py::test_... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add ability to select specific environments of matrices
{}
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/hatch/cli/run/__init__.py]
(definition of parse_variable_filter:)
def parse_variable_filter(argument):
(definition of select_matrix_environments:)
def select_matrix_environments(environments, included_variables, excluded_variables):
[end of new definitions in src/hatch/cli/run/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | c06c820d722667306f39bda956c58cf4c48d0728 | |
scikit-learn__scikit-learn-22866 | 22,866 | scikit-learn/scikit-learn | 1.1 | 38ff5be25d0164bf9598bcfdde3b791ad6e261b0 | 2022-03-16T16:29:31Z | diff --git a/doc/modules/classes.rst b/doc/modules/classes.rst
index b7000bcf7cbb2..9fe76e2699f05 100644
--- a/doc/modules/classes.rst
+++ b/doc/modules/classes.rst
@@ -933,6 +933,7 @@ details.
metrics.check_scoring
metrics.get_scorer
+ metrics.get_scorer_names
metrics.make_scorer
Classification metrics
diff --git a/doc/modules/model_evaluation.rst b/doc/modules/model_evaluation.rst
index a1fce2d3454dc..3d6a9327229e9 100644
--- a/doc/modules/model_evaluation.rst
+++ b/doc/modules/model_evaluation.rst
@@ -115,14 +115,15 @@ Usage examples:
>>> model = svm.SVC()
>>> cross_val_score(model, X, y, cv=5, scoring='wrong_choice')
Traceback (most recent call last):
- ValueError: 'wrong_choice' is not a valid scoring value. Use sorted(sklearn.metrics.SCORERS.keys()) to get valid options.
+ ValueError: 'wrong_choice' is not a valid scoring value. Use
+ sklearn.metrics.get_scorer_names() to get valid options.
.. note::
- The values listed by the ``ValueError`` exception correspond to the functions measuring
- prediction accuracy described in the following sections.
- The scorer objects for those functions are stored in the dictionary
- ``sklearn.metrics.SCORERS``.
+ The values listed by the ``ValueError`` exception correspond to the
+ functions measuring prediction accuracy described in the following
+ sections. You can retrieve the names of all available scorers by calling
+ :func:`~sklearn.metrics.get_scorer_names`.
.. currentmodule:: sklearn.metrics
@@ -563,8 +564,8 @@ or *informedness*.
Machine Learning for Predictive Data Analytics: Algorithms, Worked Examples,
and Case Studies <https://mitpress.mit.edu/books/fundamentals-machine-learning-predictive-data-analytics>`_,
2015.
- .. [Urbanowicz2015] Urbanowicz R.J., Moore, J.H. :doi:`ExSTraCS 2.0: description
- and evaluation of a scalable learning classifier
+ .. [Urbanowicz2015] Urbanowicz R.J., Moore, J.H. :doi:`ExSTraCS 2.0: description
+ and evaluation of a scalable learning classifier
system <10.1007/s12065-015-0128-8>`, Evol. Intel. (2015) 8: 89.
.. _cohen_kappa:
diff --git a/doc/whats_new/v1.1.rst b/doc/whats_new/v1.1.rst
index b9ab45e1d344a..62d871eeaac5d 100644
--- a/doc/whats_new/v1.1.rst
+++ b/doc/whats_new/v1.1.rst
@@ -633,6 +633,10 @@ Changelog
- |Enhancement| :func:`metrics.top_k_accuracy_score` raises an improved error
message when `y_true` is binary and `y_score` is 2d. :pr:`22284` by `Thomas Fan`_.
+- |API| `metrics.SCORERS` is now deprecated and will be removed in 1.3. Please
+ use :func:`~metrics.get_scorer_names` to retrieve the names of all available
+ scorers. :pr:`22866` by `Adrin Jalali`_.
+
- |API| :class:`metrics.DistanceMetric` has been moved from
:mod:`sklearn.neighbors` to :mod:`sklearn.metric`.
Using `neighbors.DistanceMetric` for imports is still valid for
diff --git a/sklearn/metrics/__init__.py b/sklearn/metrics/__init__.py
index e4339229c5b64..75a80bf892acb 100644
--- a/sklearn/metrics/__init__.py
+++ b/sklearn/metrics/__init__.py
@@ -83,6 +83,8 @@
from ._scorer import make_scorer
from ._scorer import SCORERS
from ._scorer import get_scorer
+from ._scorer import get_scorer_names
+
from ._plot.det_curve import plot_det_curve
from ._plot.det_curve import DetCurveDisplay
@@ -170,6 +172,7 @@
"roc_auc_score",
"roc_curve",
"SCORERS",
+ "get_scorer_names",
"silhouette_samples",
"silhouette_score",
"top_k_accuracy_score",
diff --git a/sklearn/metrics/_scorer.py b/sklearn/metrics/_scorer.py
index 1e8e330d1af81..e1655af169fcc 100644
--- a/sklearn/metrics/_scorer.py
+++ b/sklearn/metrics/_scorer.py
@@ -23,6 +23,8 @@
from collections import Counter
import numpy as np
+import copy
+import warnings
from . import (
r2_score,
@@ -389,6 +391,8 @@ def get_scorer(scoring):
"""Get a scorer from string.
Read more in the :ref:`User Guide <scoring_parameter>`.
+ :func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names
+ of all available scorers.
Parameters
----------
@@ -399,14 +403,20 @@ def get_scorer(scoring):
-------
scorer : callable
The scorer.
+
+ Notes
+ -----
+ When passed a string, this function always returns a copy of the scorer
+ object. Calling `get_scorer` twice for the same scorer results in two
+ separate scorer objects.
"""
if isinstance(scoring, str):
try:
- scorer = SCORERS[scoring]
+ scorer = copy.deepcopy(_SCORERS[scoring])
except KeyError:
raise ValueError(
"%r is not a valid scoring value. "
- "Use sorted(sklearn.metrics.SCORERS.keys()) "
+ "Use sklearn.metrics.get_scorer_names() "
"to get valid options." % scoring
)
else:
@@ -747,7 +757,21 @@ def make_scorer(
fowlkes_mallows_scorer = make_scorer(fowlkes_mallows_score)
-SCORERS = dict(
+# TODO(1.3) Remove
+class _DeprecatedScorers(dict):
+ """A temporary class to deprecate SCORERS."""
+
+ def __getitem__(self, item):
+ warnings.warn(
+ "sklearn.metrics.SCORERS is deprecated and will be removed in v1.3. "
+ "Please use sklearn.metrics.get_scorer_names to get a list of available "
+ "scorers and sklearn.metrics.get_metric to get scorer.",
+ FutureWarning,
+ )
+ return super().__getitem__(item)
+
+
+_SCORERS = dict(
explained_variance=explained_variance_scorer,
r2=r2_scorer,
max_error=max_error_scorer,
@@ -784,13 +808,29 @@ def make_scorer(
)
+def get_scorer_names():
+ """Get the names of all available scorers.
+
+ These names can be passed to :func:`~sklearn.metrics.get_scorer` to
+ retrieve the scorer object.
+
+ Returns
+ -------
+ list of str
+ Names of all available scorers.
+ """
+ return sorted(_SCORERS.keys())
+
+
for name, metric in [
("precision", precision_score),
("recall", recall_score),
("f1", f1_score),
("jaccard", jaccard_score),
]:
- SCORERS[name] = make_scorer(metric, average="binary")
+ _SCORERS[name] = make_scorer(metric, average="binary")
for average in ["macro", "micro", "samples", "weighted"]:
qualified_name = "{0}_{1}".format(name, average)
- SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average)
+ _SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average)
+
+SCORERS = _DeprecatedScorers(_SCORERS)
| diff --git a/sklearn/metrics/tests/test_score_objects.py b/sklearn/metrics/tests/test_score_objects.py
index 3b3caceb9970a..23680e48ae3e7 100644
--- a/sklearn/metrics/tests/test_score_objects.py
+++ b/sklearn/metrics/tests/test_score_objects.py
@@ -41,7 +41,7 @@
_MultimetricScorer,
_check_multimetric_scoring,
)
-from sklearn.metrics import make_scorer, get_scorer, SCORERS
+from sklearn.metrics import make_scorer, get_scorer, SCORERS, get_scorer_names
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
@@ -220,8 +220,8 @@ def __call__(self, est, X, y):
def test_all_scorers_repr():
# Test that all scorers have a working repr
- for name, scorer in SCORERS.items():
- repr(scorer)
+ for name in get_scorer_names():
+ repr(get_scorer(name))
def check_scoring_validator_for_single_metric_usecases(scoring_validator):
@@ -406,7 +406,7 @@ def test_classification_binary_scores(scorer_name, metric):
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
- score = SCORERS[scorer_name](clf, X_test, y_test)
+ score = get_scorer(scorer_name)(clf, X_test, y_test)
expected_score = metric(y_test, clf.predict(X_test))
assert_almost_equal(score, expected_score)
@@ -444,7 +444,7 @@ def test_classification_multiclass_scores(scorer_name, metric):
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X_train, y_train)
- score = SCORERS[scorer_name](clf, X_test, y_test)
+ score = get_scorer(scorer_name)(clf, X_test, y_test)
expected_score = metric(y_test, clf.predict(X_test))
assert score == pytest.approx(expected_score)
@@ -617,7 +617,8 @@ def test_classification_scorer_sample_weight():
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
- for name, scorer in SCORERS.items():
+ for name in get_scorer_names():
+ scorer = get_scorer(name)
if name in REGRESSION_SCORERS:
# skip the regression scores
continue
@@ -672,7 +673,8 @@ def test_regression_scorer_sample_weight():
reg = DecisionTreeRegressor(random_state=0)
reg.fit(X_train, y_train)
- for name, scorer in SCORERS.items():
+ for name in get_scorer_names():
+ scorer = get_scorer(name)
if name not in REGRESSION_SCORERS:
# skip classification scorers
continue
@@ -701,7 +703,7 @@ def test_regression_scorer_sample_weight():
)
-@pytest.mark.parametrize("name", SCORERS)
+@pytest.mark.parametrize("name", get_scorer_names())
def test_scorer_memmap_input(name):
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
@@ -715,7 +717,7 @@ def test_scorer_memmap_input(name):
# UndefinedMetricWarning for P / R scores
with ignore_warnings():
- scorer, estimator = SCORERS[name], ESTIMATORS[name]
+ scorer, estimator = get_scorer(name), ESTIMATORS[name]
if name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm_1)
else:
@@ -1120,6 +1122,17 @@ def test_scorer_select_proba_error(scorer):
scorer(lr, X, y)
+def test_get_scorer_return_copy():
+ # test that get_scorer returns a copy
+ assert get_scorer("roc_auc") is not get_scorer("roc_auc")
+
+
+# TODO(1.3) Remove
+def test_SCORERS_deprecated():
+ with pytest.warns(FutureWarning, match="is deprecated and will be removed in v1.3"):
+ SCORERS["roc_auc"]
+
+
def test_scorer_no_op_multiclass_select_proba():
# check that calling a ProbaScorer on a multiclass problem do not raise
# even if `y_true` would be binary during the scoring.
| diff --git a/doc/modules/classes.rst b/doc/modules/classes.rst
index b7000bcf7cbb2..9fe76e2699f05 100644
--- a/doc/modules/classes.rst
+++ b/doc/modules/classes.rst
@@ -933,6 +933,7 @@ details.
metrics.check_scoring
metrics.get_scorer
+ metrics.get_scorer_names
metrics.make_scorer
Classification metrics
diff --git a/doc/modules/model_evaluation.rst b/doc/modules/model_evaluation.rst
index a1fce2d3454dc..3d6a9327229e9 100644
--- a/doc/modules/model_evaluation.rst
+++ b/doc/modules/model_evaluation.rst
@@ -115,14 +115,15 @@ Usage examples:
>>> model = svm.SVC()
>>> cross_val_score(model, X, y, cv=5, scoring='wrong_choice')
Traceback (most recent call last):
- ValueError: 'wrong_choice' is not a valid scoring value. Use sorted(sklearn.metrics.SCORERS.keys()) to get valid options.
+ ValueError: 'wrong_choice' is not a valid scoring value. Use
+ sklearn.metrics.get_scorer_names() to get valid options.
.. note::
- The values listed by the ``ValueError`` exception correspond to the functions measuring
- prediction accuracy described in the following sections.
- The scorer objects for those functions are stored in the dictionary
- ``sklearn.metrics.SCORERS``.
+ The values listed by the ``ValueError`` exception correspond to the
+ functions measuring prediction accuracy described in the following
+ sections. You can retrieve the names of all available scorers by calling
+ :func:`~sklearn.metrics.get_scorer_names`.
.. currentmodule:: sklearn.metrics
@@ -563,8 +564,8 @@ or *informedness*.
Machine Learning for Predictive Data Analytics: Algorithms, Worked Examples,
and Case Studies <https://mitpress.mit.edu/books/fundamentals-machine-learning-predictive-data-analytics>`_,
2015.
- .. [Urbanowicz2015] Urbanowicz R.J., Moore, J.H. :doi:`ExSTraCS 2.0: description
- and evaluation of a scalable learning classifier
+ .. [Urbanowicz2015] Urbanowicz R.J., Moore, J.H. :doi:`ExSTraCS 2.0: description
+ and evaluation of a scalable learning classifier
system <10.1007/s12065-015-0128-8>`, Evol. Intel. (2015) 8: 89.
.. _cohen_kappa:
diff --git a/doc/whats_new/v1.1.rst b/doc/whats_new/v1.1.rst
index b9ab45e1d344a..62d871eeaac5d 100644
--- a/doc/whats_new/v1.1.rst
+++ b/doc/whats_new/v1.1.rst
@@ -633,6 +633,10 @@ Changelog
- |Enhancement| :func:`metrics.top_k_accuracy_score` raises an improved error
message when `y_true` is binary and `y_score` is 2d. :pr:`22284` by `Thomas Fan`_.
+- |API| `metrics.SCORERS` is now deprecated and will be removed in 1.3. Please
+ use :func:`~metrics.get_scorer_names` to retrieve the names of all available
+ scorers. :pr:`22866` by `Adrin Jalali`_.
+
- |API| :class:`metrics.DistanceMetric` has been moved from
:mod:`sklearn.neighbors` to :mod:`sklearn.metric`.
Using `neighbors.DistanceMetric` for imports is still valid for
| [
{
"components": [
{
"doc": "A temporary class to deprecate SCORERS.",
"lines": [
761,
771
],
"name": "_DeprecatedScorers",
"signature": "class _DeprecatedScorers(dict):",
"type": "class"
},
{
"doc": "",
"lines"... | [
"sklearn/metrics/tests/test_score_objects.py::test_all_scorers_repr",
"sklearn/metrics/tests/test_score_objects.py::test_check_scoring_and_check_multimetric_scoring[single_tuple]",
"sklearn/metrics/tests/test_score_objects.py::test_check_scoring_and_check_multimetric_scoring[single_list]",
"sklearn/metrics/te... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
API get_scorer returns a copy and introduce get_scorer_names
__EDIT__: This PR now introduces a new `get_scorer_names`, makes `get_metric` to return a copy, and deprecated `SCORERS`.
__OLD__:
This PR makes `SCORERS[scorer_name]` to return a copy rather than the original object. This also means `get_scorer` returns a copy each time.
cc @thomasjpfan @jnothman @glemaitre @lorentzenchr
Note that this is to be merged into `main`.
Closes #17942
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sklearn/metrics/_scorer.py]
(definition of _DeprecatedScorers:)
class _DeprecatedScorers(dict):
"""A temporary class to deprecate SCORERS."""
(definition of _DeprecatedScorers.__getitem__:)
def __getitem__(self, item):
(definition of get_scorer_names:)
def get_scorer_names():
"""Get the names of all available scorers.
These names can be passed to :func:`~sklearn.metrics.get_scorer` to
retrieve the scorer object.
Returns
-------
list of str
Names of all available scorers."""
[end of new definitions in sklearn/metrics/_scorer.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Should get_scorer return a deep copy of the scorer object
I stumble in something that I am wondering if this is an issue:
```python
scorer = get_scorer("roc_auc")
# somehow set the pos_label
scorer._kwargs["pos_label"] = "xxx"
scorer_2 = get_scorer("roc_auc")
# I would expect it to be a deep copy and not the same as scorer
assert scorer is not scorer_2
```
----------
Working on sample props I also encountered this issue.
I have been thinking of `get_scorer` as returning a singleton and should not be modified. In general, I think returning a copy would be better because the following is undesirable:
```py
from sklearn.metrics import get_scorer
scorer = get_scorer("roc_auc")
# somehow set the pos_label
scorer._kwargs["pos_label"] = "xxx"
scorer_2 = get_scorer("roc_auc")
scorer_2._kwargs
# {'pos_label': 'xxx'}
```
And in one of my fix I am starting to make such internal changes. This is actually a surprising behaviour when it fails :)
Are there arguments in favor of **not** returning a (deep) copy?
Something I just realized regarding how in `GridSearchCV` we modify the scorer, is that the user can't easily then reproduce the scores reported by `GridSearchCV`. Should we then just raise when there's a need to change the scorer? Hmm, not sure.
--------------------
</issues> | 38ff5be25d0164bf9598bcfdde3b791ad6e261b0 |
conan-io__conan-10800 | 10,800 | conan-io/conan | null | 6ebea89bd1ea05cce7b0213f2cd067f9f666effa | 2022-03-16T09:16:38Z | diff --git a/conan/tools/cmake/toolchain/blocks.py b/conan/tools/cmake/toolchain/blocks.py
index fc383bcddab..0680d7af4d2 100644
--- a/conan/tools/cmake/toolchain/blocks.py
+++ b/conan/tools/cmake/toolchain/blocks.py
@@ -547,6 +547,38 @@ def context(self):
return {"paths": [ut.replace("\\", "/") for ut in user_toolchain]}
+class ExtraFlagsBlock(Block):
+ """This block is adding flags directly from user [conf] section"""
+
+ template = textwrap.dedent("""
+ {% if cxxflags %}
+ string(APPEND CONAN_CXX_FLAGS "{% for cxxflag in cxxflags %} {{ cxxflag }}{% endfor %}")
+ {% endif %}
+ {% if cflags %}
+ string(APPEND CONAN_C_FLAGS "{% for cflag in cflags %} {{ cflag }}{% endfor %}")
+ {% endif %}
+ {% if sharedlinkflags %}
+ string(APPEND CONAN_SHARED_LINKER_FLAGS "{% for sharedlinkflag in sharedlinkflags %} {{ sharedlinkflag }}{% endfor %}")
+ {% endif %}
+ {% if exelinkflags %}
+ string(APPEND CONAN_EXE_LINKER_FLAGS "{% for exelinkflag in exelinkflags %} {{ exelinkflag }}{% endfor %}")
+ {% endif %}
+ """)
+
+ def context(self):
+ # Now, it's time to get all the flags defined by the user
+ cxxflags = self._conanfile.conf.get("tools.build:cxxflags", default=[], check_type=list)
+ cflags = self._conanfile.conf.get("tools.build:cflags", default=[], check_type=list)
+ sharedlinkflags = self._conanfile.conf.get("tools.build:sharedlinkflags", default=[], check_type=list)
+ exelinkflags = self._conanfile.conf.get("tools.build:exelinkflags", default=[], check_type=list)
+ return {
+ "cxxflags": cxxflags,
+ "cflags": cflags,
+ "sharedlinkflags": sharedlinkflags,
+ "exelinkflags": exelinkflags
+ }
+
+
class CMakeFlagsInitBlock(Block):
template = textwrap.dedent("""
if(DEFINED CONAN_CXX_FLAGS)
diff --git a/conan/tools/cmake/toolchain/toolchain.py b/conan/tools/cmake/toolchain/toolchain.py
index 08c03023132..157783699a9 100644
--- a/conan/tools/cmake/toolchain/toolchain.py
+++ b/conan/tools/cmake/toolchain/toolchain.py
@@ -10,7 +10,7 @@
from conan.tools.cmake.toolchain.blocks import ToolchainBlocks, UserToolchain, GenericSystemBlock, \
AndroidSystemBlock, AppleSystemBlock, FPicBlock, ArchitectureBlock, GLibCXXBlock, VSRuntimeBlock, \
CppStdBlock, ParallelBlock, CMakeFlagsInitBlock, TryCompileBlock, FindFiles, SkipRPath, \
- SharedLibBock, OutputDirsBlock
+ SharedLibBock, OutputDirsBlock, ExtraFlagsBlock
from conan.tools.files.files import save_toolchain_args
from conan.tools.intel import IntelCC
from conan.tools.microsoft import VCVars
@@ -126,6 +126,7 @@ def __init__(self, conanfile, generator=None, namespace=None):
("vs_runtime", VSRuntimeBlock),
("cppstd", CppStdBlock),
("parallel", ParallelBlock),
+ ("extra_flags", ExtraFlagsBlock),
("cmake_flags_init", CMakeFlagsInitBlock),
("try_compile", TryCompileBlock),
("find_paths", FindFiles),
@@ -133,7 +134,6 @@ def __init__(self, conanfile, generator=None, namespace=None):
("shared", SharedLibBock),
("output_dirs", OutputDirsBlock)])
-
check_using_build_profile(self._conanfile)
def _context(self):
diff --git a/conan/tools/gnu/autotoolstoolchain.py b/conan/tools/gnu/autotoolstoolchain.py
index 42b56dd9a2a..826898007ae 100644
--- a/conan/tools/gnu/autotoolstoolchain.py
+++ b/conan/tools/gnu/autotoolstoolchain.py
@@ -15,33 +15,34 @@ class AutotoolsToolchain:
def __init__(self, conanfile, namespace=None):
self._conanfile = conanfile
self._namespace = namespace
- build_type = self._conanfile.settings.get_safe("build_type")
self.configure_args = []
self.make_args = []
self.default_configure_install_args = True
- # TODO: compiler.runtime for Visual studio?
- # defines
- self.ndebug = None
- if build_type in ['Release', 'RelWithDebInfo', 'MinSizeRel']:
- self.ndebug = "NDEBUG"
- self.gcc_cxx11_abi = self._cxx11_abi_define()
- self.defines = []
-
- # cxxflags, cflags
+ # Flags
self.cxxflags = []
self.cflags = []
self.ldflags = []
- self.libcxx = self._libcxx()
- self.fpic = self._conanfile.options.get_safe("fPIC")
+ self.defines = []
+
+ # Defines
+ self.gcc_cxx11_abi = self._get_cxx11_abi_define()
+ self.ndebug = None
+ build_type = self._conanfile.settings.get_safe("build_type")
+ if build_type in ['Release', 'RelWithDebInfo', 'MinSizeRel']:
+ self.ndebug = "NDEBUG"
- self.cppstd = cppstd_flag(self._conanfile.settings)
- self.arch_flag = architecture_flag(self._conanfile.settings)
# TODO: This is also covering compilers like Visual Studio, necessary to test it (&remove?)
self.build_type_flags = build_type_flags(self._conanfile.settings)
self.build_type_link_flags = build_type_link_flags(self._conanfile.settings)
+ self.cppstd = cppstd_flag(self._conanfile.settings)
+ self.arch_flag = architecture_flag(self._conanfile.settings)
+ self.libcxx = self._get_libcxx_flag()
+ self.fpic = self._conanfile.options.get_safe("fPIC")
+ self.msvc_runtime_flag = self._get_msvc_runtime_flag()
+
# Cross build
self._host = None
self._build = None
@@ -50,8 +51,6 @@ def __init__(self, conanfile, namespace=None):
self.apple_arch_flag = self.apple_isysroot_flag = None
self.apple_min_version_flag = apple_min_version_flag(self._conanfile)
- self.msvc_runtime_flag = self._get_msvc_runtime_flag()
-
if cross_building(self._conanfile):
os_build, arch_build, os_host, arch_host = get_cross_building_settings(self._conanfile)
compiler = self._conanfile.settings.get_safe("compiler")
@@ -69,6 +68,22 @@ def __init__(self, conanfile, namespace=None):
check_using_build_profile(self._conanfile)
+ def _get_cxx11_abi_define(self):
+ # https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html
+ # The default is libstdc++11, only specify the contrary '_GLIBCXX_USE_CXX11_ABI=0'
+ settings = self._conanfile.settings
+ libcxx = settings.get_safe("compiler.libcxx")
+ if not libcxx:
+ return
+
+ compiler = settings.get_safe("compiler.base") or settings.get_safe("compiler")
+ if compiler in ['clang', 'apple-clang', 'gcc']:
+ if libcxx == 'libstdc++':
+ return '_GLIBCXX_USE_CXX11_ABI=0'
+ elif libcxx == "libstdc++11" and self._conanfile.conf.get("tools.gnu:define_libcxx11_abi",
+ check_type=bool):
+ return '_GLIBCXX_USE_CXX11_ABI=1'
+
def _get_msvc_runtime_flag(self):
msvc_runtime_flag = None
if self._conanfile.settings.get_safe("compiler") == "msvc":
@@ -87,23 +102,7 @@ def _get_msvc_runtime_flag(self):
return msvc_runtime_flag
- def _cxx11_abi_define(self):
- # https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html
- # The default is libstdc++11, only specify the contrary '_GLIBCXX_USE_CXX11_ABI=0'
- settings = self._conanfile.settings
- libcxx = settings.get_safe("compiler.libcxx")
- if not libcxx:
- return
-
- compiler = settings.get_safe("compiler.base") or settings.get_safe("compiler")
- if compiler in ['clang', 'apple-clang', 'gcc']:
- if libcxx == 'libstdc++':
- return '_GLIBCXX_USE_CXX11_ABI=0'
- elif libcxx == "libstdc++11" and self._conanfile.conf.get("tools.gnu:define_libcxx11_abi",
- check_type=bool):
- return '_GLIBCXX_USE_CXX11_ABI=1'
-
- def _libcxx(self):
+ def _get_libcxx_flag(self):
settings = self._conanfile.settings
libcxx = settings.get_safe("compiler.libcxx")
if not libcxx:
@@ -124,57 +123,48 @@ def _libcxx(self):
elif compiler == "qcc":
return "-Y _%s" % str(libcxx)
+ @staticmethod
+ def _filter_list_empty_fields(v):
+ return list(filter(bool, v))
+
+ def _get_extra_flags(self):
+ # Now, it's time to get all the flags defined by the user
+ cxxflags = self._conanfile.conf.get("tools.build:cxxflags", default=[], check_type=list)
+ cflags = self._conanfile.conf.get("tools.build:cflags", default=[], check_type=list)
+ ldflags = self._conanfile.conf.get("tools.build:ldflags", default=[], check_type=list)
+ cppflags = self._conanfile.conf.get("tools.build:cppflags", default=[], check_type=list)
+ return {
+ "cxxflags": cxxflags,
+ "cflags": cflags,
+ "cppflags": cppflags,
+ "ldflags": ldflags
+ }
+
def environment(self):
env = Environment()
- # defines
- if self.ndebug:
- self.defines.append(self.ndebug)
- if self.gcc_cxx11_abi:
- self.defines.append(self.gcc_cxx11_abi)
-
- if self.libcxx:
- self.cxxflags.append(self.libcxx)
-
- if self.cppstd:
- self.cxxflags.append(self.cppstd)
-
- if self.arch_flag:
- self.cxxflags.append(self.arch_flag)
- self.cflags.append(self.arch_flag)
- self.ldflags.append(self.arch_flag)
- if self.build_type_flags:
- self.cxxflags.extend(self.build_type_flags)
- self.cflags.extend(self.build_type_flags)
+ apple_flags = [self.apple_isysroot_flag, self.apple_arch_flag, self.apple_min_version_flag]
+ fpic = "-fPIC" if self.fpic else None
+ extra_flags = self._get_extra_flags()
- if self.build_type_link_flags:
- self.ldflags.extend(self.build_type_link_flags)
-
- if self.fpic:
- self.cxxflags.append("-fPIC")
- self.cflags.append("-fPIC")
-
- if self.msvc_runtime_flag:
- self.cxxflags.append(self.msvc_runtime_flag)
- self.cflags.append(self.msvc_runtime_flag)
+ self.cxxflags.extend([self.libcxx, self.cppstd,
+ self.arch_flag, fpic, self.msvc_runtime_flag]
+ + self.build_type_flags + apple_flags + extra_flags["cxxflags"])
+ self.cflags.extend([self.arch_flag, fpic, self.msvc_runtime_flag]
+ + self.build_type_flags + apple_flags + extra_flags["cflags"])
+ self.ldflags.extend([self.arch_flag] + self.build_type_link_flags
+ + apple_flags + extra_flags["ldflags"])
+ self.defines.extend([self.ndebug, self.gcc_cxx11_abi] + extra_flags["cppflags"])
if is_msvc(self._conanfile):
env.define("CXX", "cl")
env.define("CC", "cl")
- # FIXME: Previously these flags where checked if already present at env 'CFLAGS', 'CXXFLAGS'
- # and 'self.cxxflags', 'self.cflags' before adding them
- for f in list(filter(bool, [self.apple_isysroot_flag,
- self.apple_arch_flag,
- self.apple_min_version_flag])):
- self.cxxflags.append(f)
- self.cflags.append(f)
- self.ldflags.append(f)
-
- env.append("CPPFLAGS", ["-D{}".format(d) for d in self.defines])
- env.append("CXXFLAGS", self.cxxflags)
- env.append("CFLAGS", self.cflags)
- env.append("LDFLAGS", self.ldflags)
+ env.append("CPPFLAGS", ["-D{}".format(d) for d in self._filter_list_empty_fields(self.defines)])
+ env.append("CXXFLAGS", self._filter_list_empty_fields(self.cxxflags))
+ env.append("CFLAGS", self._filter_list_empty_fields(self.cflags))
+ env.append("LDFLAGS", self._filter_list_empty_fields(self.ldflags))
+
return env
def vars(self):
diff --git a/conan/tools/meson/meson.py b/conan/tools/meson/meson.py
index 37287caa3e3..1285bab4769 100644
--- a/conan/tools/meson/meson.py
+++ b/conan/tools/meson/meson.py
@@ -3,6 +3,7 @@
from conan.tools.build import build_jobs
from conan.tools.meson import MesonToolchain
+
class Meson(object):
def __init__(self, conanfile):
self._conanfile = conanfile
diff --git a/conan/tools/meson/toolchain.py b/conan/tools/meson/toolchain.py
index 4f3880e3e4d..9540c0bc415 100644
--- a/conan/tools/meson/toolchain.py
+++ b/conan/tools/meson/toolchain.py
@@ -139,14 +139,19 @@ def __init__(self, conanfile, backend=None):
self.as_ = build_env.get("AS")
self.windres = build_env.get("WINDRES")
self.pkgconfig = build_env.get("PKG_CONFIG")
- self.c_args = build_env.get("CFLAGS", "")
- self.c_link_args = build_env.get("LDFLAGS", "")
- self.cpp_args = build_env.get("CXXFLAGS", "")
- self.cpp_link_args = build_env.get("LDFLAGS", "")
+ self.c_args = self._get_env_list(build_env.get("CFLAGS", []))
+ self.c_link_args = self._get_env_list(build_env.get("LDFLAGS", []))
+ self.cpp_args = self._get_env_list(build_env.get("CXXFLAGS", []))
+ self.cpp_link_args = self._get_env_list(build_env.get("LDFLAGS", []))
- self._add_apple_flags()
+ # Apple flags
+ self.apple_arch_flag = []
+ self.apple_isysroot_flag = []
+ self.apple_min_version_flag = []
- def _add_apple_flags(self):
+ self._resolve_apple_flags()
+
+ def _resolve_apple_flags(self):
conanfile = self._conanfile
os_ = conanfile.settings.get_safe("os")
if not is_apple_os(os_):
@@ -162,33 +167,41 @@ def _add_apple_flags(self):
if not os_sdk and os_ != "Macos":
raise ConanException("Please, specify a suitable value for os.sdk.")
- arch = to_apple_arch(conanfile.settings.get_safe("arch"))
# Calculating the main Apple flags
- deployment_target_flag = apple_min_version_flag(conanfile)
- sysroot_flag = "-isysroot " + sdk_path if sdk_path else ""
- arch_flag = "-arch " + arch if arch else ""
-
- apple_flags = {}
- if deployment_target_flag:
- flag_ = deployment_target_flag.split("=")[0]
- apple_flags[flag_] = deployment_target_flag
- if sysroot_flag:
- apple_flags["-isysroot"] = sysroot_flag
- if arch_flag:
- apple_flags["-arch"] = arch_flag
-
- for flag, arg_value in apple_flags.items():
- v = " " + arg_value
- if flag not in self.c_args:
- self.c_args += v
- if flag not in self.c_link_args:
- self.c_link_args += v
- if flag not in self.cpp_args:
- self.cpp_args += v
- if flag not in self.cpp_link_args:
- self.cpp_link_args += v
+ arch = to_apple_arch(conanfile.settings.get_safe("arch"))
+ self.apple_arch_flag = ["-arch", arch] if arch else []
+ self.apple_isysroot_flag = ["-isysroot", sdk_path] if sdk_path else []
+ self.apple_min_version_flag = [apple_min_version_flag(conanfile)]
+
+ def _get_extra_flags(self):
+ # Now, it's time to get all the flags defined by the user
+ cxxflags = self._conanfile.conf.get("tools.build:cxxflags", default=[], check_type=list)
+ cflags = self._conanfile.conf.get("tools.build:cflags", default=[], check_type=list)
+ ldflags = self._conanfile.conf.get("tools.build:ldflags", default=[], check_type=list)
+ return {
+ "cxxflags": cxxflags,
+ "cflags": cflags,
+ "ldflags": ldflags
+ }
+
+ @staticmethod
+ def _get_env_list(v):
+ # FIXME: Should Environment have the "check_type=None" keyword as Conf?
+ return v.strip().split() if not isinstance(v, list) else v
+
+ @staticmethod
+ def _filter_list_empty_fields(v):
+ return list(filter(bool, v))
def _context(self):
+ apple_flags = self.apple_isysroot_flag + self.apple_arch_flag + self.apple_min_version_flag
+ extra_flags = self._get_extra_flags()
+
+ self.c_args.extend(apple_flags + extra_flags["cflags"])
+ self.cpp_args.extend(apple_flags + extra_flags["cxxflags"])
+ self.c_link_args.extend(apple_flags + extra_flags["ldflags"])
+ self.cpp_link_args.extend(apple_flags + extra_flags["ldflags"])
+
return {
# https://mesonbuild.com/Machine-files.html#project-specific-options
"project_options": {k: to_meson_value(v) for k, v in self.project_options.items()},
@@ -215,10 +228,10 @@ def _context(self):
"b_ndebug": to_meson_value(self._b_ndebug), # boolean as string
# https://mesonbuild.com/Builtin-options.html#compiler-options
"cpp_std": self._cpp_std,
- "c_args": to_meson_value(self.c_args.strip().split()),
- "c_link_args": to_meson_value(self.c_link_args.strip().split()),
- "cpp_args": to_meson_value(self.cpp_args.strip().split()),
- "cpp_link_args": to_meson_value(self.cpp_link_args.strip().split()),
+ "c_args": to_meson_value(self._filter_list_empty_fields(self.c_args)),
+ "c_link_args": to_meson_value(self._filter_list_empty_fields(self.c_link_args)),
+ "cpp_args": to_meson_value(self._filter_list_empty_fields(self.cpp_args)),
+ "cpp_link_args": to_meson_value(self._filter_list_empty_fields(self.cpp_link_args)),
"pkg_config_path": self.pkg_config_path,
"preprocessor_definitions": self.preprocessor_definitions,
"cross_build": self.cross_build
diff --git a/conans/model/conf.py b/conans/model/conf.py
index 5d0a381e857..e60ca3f3d41 100644
--- a/conans/model/conf.py
+++ b/conans/model/conf.py
@@ -39,7 +39,14 @@
"tools.system.package_manager:mode": "Mode for package_manager tools: 'check' or 'install'",
"tools.system.package_manager:sudo": "Use 'sudo' when invoking the package manager tools in Linux (False by default)",
"tools.system.package_manager:sudo_askpass": "Use the '-A' argument if using sudo in Linux to invoke the system package manager (False by default)",
- "tools.apple.xcodebuild:verbosity": "Verbosity level for xcodebuild: 'verbose' or 'quiet"
+ "tools.apple.xcodebuild:verbosity": "Verbosity level for xcodebuild: 'verbose' or 'quiet",
+ # Flags configuration
+ "tools.build:cxxflags": "List of extra CXX flags used by different toolchains like CMakeToolchain, AutotoolsToolchain and MesonToolchain",
+ "tools.build:cflags": "List of extra C flags used by different toolchains like CMakeToolchain, AutotoolsToolchain and MesonToolchain",
+ "tools.build:cppflags": "List of extra CPP flags used by different toolchains like AutotoolsToolchain and MesonToolchain",
+ "tools.build:ldflags": "List of extra LD flags used by different toolchains like AutotoolsToolchain and MesonToolchain",
+ "tools.build:sharedlinkflags": "List of extra flags used by CMakeToolchain for CMAKE_SHARED_LINKER_FLAGS_INIT variable",
+ "tools.build:exelinkflags": "List of extra flags used by CMakeToolchain for CMAKE_EXE_LINKER_FLAGS_INIT variable",
}
@@ -228,6 +235,8 @@ def get(self, conf_name, default=None, check_type=None):
return self._get_boolean_value(v)
elif check_type is str and not isinstance(v, str):
return str(v)
+ elif v is None: # value was unset
+ return default
elif check_type is not None and not isinstance(v, check_type):
raise ConanException("[conf] {name} must be a {type}-like object. "
"The value '{value}' introduced is a {vtype} "
| diff --git a/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py b/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
index dad51dde5c2..6aa43d55318 100644
--- a/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
+++ b/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
@@ -229,3 +229,34 @@ def generate(self):
with open(os.path.join(client.current_folder, "conan_toolchain.cmake")) as f:
contents = f.read()
assert "/path/to/builddir" in contents
+
+
+def test_extra_flags_via_conf():
+ profile = textwrap.dedent("""
+ [settings]
+ os=Linux
+ compiler=gcc
+ compiler.version=6
+ compiler.libcxx=libstdc++11
+ arch=armv8
+ build_type=Release
+
+ [conf]
+ tools.build:cxxflags=["--flag1", "--flag2"]
+ tools.build:cflags+=["--flag3", "--flag4"]
+ tools.build:sharedlinkflags=+["--flag5", "--flag6"]
+ tools.build:exelinkflags=["--flag7", "--flag8"]
+ """)
+
+ client = TestClient(path_with_spaces=False)
+
+ conanfile = GenConanfile().with_settings("os", "arch", "compiler", "build_type")\
+ .with_generator("CMakeToolchain")
+ client.save({"conanfile.py": conanfile,
+ "profile": profile})
+ client.run("install . --profile:build=profile --profile:host=profile")
+ toolchain = client.load("conan_toolchain.cmake")
+ assert 'string(APPEND CONAN_CXX_FLAGS " --flag1 --flag2")' in toolchain
+ assert 'string(APPEND CONAN_C_FLAGS " --flag3 --flag4")' in toolchain
+ assert 'string(APPEND CONAN_SHARED_LINKER_FLAGS " --flag5 --flag6")' in toolchain
+ assert 'string(APPEND CONAN_EXE_LINKER_FLAGS " --flag7 --flag8")' in toolchain
diff --git a/conans/test/integration/toolchains/gnu/__init__.py b/conans/test/integration/toolchains/gnu/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/conans/test/integration/toolchains/gnu/test_autotoolstoolchain.py b/conans/test/integration/toolchains/gnu/test_autotoolstoolchain.py
new file mode 100644
index 00000000000..eca88af2f8c
--- /dev/null
+++ b/conans/test/integration/toolchains/gnu/test_autotoolstoolchain.py
@@ -0,0 +1,43 @@
+import platform
+import textwrap
+
+from conans.test.assets.genconanfile import GenConanfile
+from conans.test.utils.tools import TestClient
+
+
+def test_extra_flags_via_conf():
+ os_ = platform.system()
+ os_ = "Macos" if os_ == "Darwin" else os_
+
+ profile = textwrap.dedent("""
+ [settings]
+ os=%s
+ compiler=gcc
+ compiler.version=6
+ compiler.libcxx=libstdc++11
+ arch=armv8
+ build_type=Release
+
+ [conf]
+ tools.build:cxxflags=["--flag1", "--flag2"]
+ tools.build:cflags+=["--flag3", "--flag4"]
+ tools.build:ldflags+=["--flag5", "--flag6"]
+ tools.build:cppflags+=["DEF1", "DEF2"]
+ """ % os_)
+ client = TestClient()
+ conanfile = GenConanfile().with_settings("os", "arch", "compiler", "build_type")\
+ .with_generator("AutotoolsToolchain")
+ client.save({"conanfile.py": conanfile,
+ "profile": profile})
+ client.run("install . --profile:build=profile --profile:host=profile")
+ toolchain = client.load("conanautotoolstoolchain{}".format('.bat' if os_ == "Windows" else '.sh'))
+ if os_ == "Windows":
+ assert 'set "CPPFLAGS=%CPPFLAGS% -DNDEBUG -DDEF1 -DDEF2"' in toolchain
+ assert 'set "CXXFLAGS=%CXXFLAGS% -O3 -s --flag1 --flag2"' in toolchain
+ assert 'set "CFLAGS=%CFLAGS% -O3 -s --flag3 --flag4"' in toolchain
+ assert 'set "LDFLAGS=%LDFLAGS% --flag5 --flag6"' in toolchain
+ else:
+ assert 'export CPPFLAGS="$CPPFLAGS -DNDEBUG -DDEF1 -DDEF2"' in toolchain
+ assert 'export CXXFLAGS="$CXXFLAGS -O3 -s --flag1 --flag2"' in toolchain
+ assert 'export CFLAGS="$CFLAGS -O3 -s --flag3 --flag4"' in toolchain
+ assert 'export LDFLAGS="$LDFLAGS --flag5 --flag6"' in toolchain
diff --git a/conans/test/integration/toolchains/meson/test_mesontoolchain.py b/conans/test/integration/toolchains/meson/test_mesontoolchain.py
index 4f4ad7f2b08..1f31e46d69e 100644
--- a/conans/test/integration/toolchains/meson/test_mesontoolchain.py
+++ b/conans/test/integration/toolchains/meson/test_mesontoolchain.py
@@ -7,54 +7,8 @@
from conans.test.utils.tools import TestClient
-build_env_1 = textwrap.dedent("""
-CFLAGS=-mios-version-min=1 -isysroot ROOT1 -arch armv1
-CXXFLAGS=-mios-version-min=2 -isysroot ROOT2 -arch armv2
-LDFLAGS=-mios-version-min=3 -isysroot ROOT3 -arch armv3
-""")
-
-expected_args_1 = textwrap.dedent("""
-c_args = ['-mios-version-min=1', '-isysroot', 'ROOT1', '-arch', 'armv1'] + preprocessor_definitions
-c_link_args = ['-mios-version-min=3', '-isysroot', 'ROOT3', '-arch', 'armv3']
-cpp_args = ['-mios-version-min=2', '-isysroot', 'ROOT2', '-arch', 'armv2'] + preprocessor_definitions
-cpp_link_args = ['-mios-version-min=3', '-isysroot', 'ROOT3', '-arch', 'armv3']
-""")
-
-build_env_2 = textwrap.dedent("""
-CFLAGS=-isysroot ROOT1 -arch armv1
-CXXFLAGS=-mios-version-min=2 -arch armv2
-LDFLAGS=-mios-version-min=3 -isysroot ROOT3
-""")
-
-expected_args_2 = textwrap.dedent("""
-c_args = ['-isysroot', 'ROOT1', '-arch', 'armv1', '-mios-version-min=10.0'] + preprocessor_definitions
-c_link_args = ['-mios-version-min=3', '-isysroot', 'ROOT3', '-arch', 'arm64']
-cpp_args = ['-mios-version-min=2', '-arch', 'armv2', '-isysroot', '/my/sdk/path'] + preprocessor_definitions
-cpp_link_args = ['-mios-version-min=3', '-isysroot', 'ROOT3', '-arch', 'arm64']
-""")
-
-
-build_env_3 = textwrap.dedent("""
-CFLAGS=-flag1
-CXXFLAGS=-flag2
-LDFLAGS=-flag3
-""")
-
-expected_args_3 = textwrap.dedent("""
-c_args = ['-flag1', '-mios-version-min=10.0', '-isysroot', '/my/sdk/path', '-arch', 'arm64'] + preprocessor_definitions
-c_link_args = ['-flag3', '-mios-version-min=10.0', '-isysroot', '/my/sdk/path', '-arch', 'arm64']
-cpp_args = ['-flag2', '-mios-version-min=10.0', '-isysroot', '/my/sdk/path', '-arch', 'arm64'] + preprocessor_definitions
-cpp_link_args = ['-flag3', '-mios-version-min=10.0', '-isysroot', '/my/sdk/path', '-arch', 'arm64']
-""")
-
-
@pytest.mark.skipif(sys.version_info.major == 2, reason="Meson not supported in Py2")
-@pytest.mark.parametrize("build_env,expected_args", [
- (build_env_1, expected_args_1),
- (build_env_2, expected_args_2),
- (build_env_3, expected_args_3),
-])
-def test_apple_meson_keep_user_flags(build_env, expected_args):
+def test_apple_meson_keep_user_custom_flags():
default = textwrap.dedent("""
[settings]
os=Macos
@@ -75,12 +29,9 @@ def test_apple_meson_keep_user_flags(build_env, expected_args):
compiler.version = 12.0
compiler.libcxx = libc++
- [buildenv]
- {build_env}
-
[conf]
tools.apple:sdk_path=/my/sdk/path
- """.format(build_env=build_env))
+ """)
_conanfile_py = textwrap.dedent("""
from conan import ConanFile
@@ -91,6 +42,10 @@ class App(ConanFile):
def generate(self):
tc = MesonToolchain(self)
+ # Customized apple flags
+ tc.apple_arch_flag = ['-arch', 'myarch']
+ tc.apple_isysroot_flag = ['-isysroot', '/other/sdk/path']
+ tc.apple_min_version_flag = ['-otherminversion=10.7']
tc.generate()
""")
@@ -101,9 +56,47 @@ def generate(self):
t.run("install . -pr:h host_prof -pr:b build_prof")
content = t.load(MesonToolchain.cross_filename)
- assert expected_args in content
+ assert "c_args = ['-isysroot', '/other/sdk/path', '-arch', 'myarch', '-otherminversion=10.7']" in content
+ assert "c_link_args = ['-isysroot', '/other/sdk/path', '-arch', 'myarch', '-otherminversion=10.7']" in content
+ assert "cpp_args = ['-isysroot', '/other/sdk/path', '-arch', 'myarch', '-otherminversion=10.7']" in content
+ assert "cpp_link_args = ['-isysroot', '/other/sdk/path', '-arch', 'myarch', '-otherminversion=10.7']" in content
+@pytest.mark.skipif(sys.version_info.major == 2, reason="Meson not supported in Py2")
+def test_extra_flags_via_conf():
+ profile = textwrap.dedent("""
+ [settings]
+ os=Windows
+ arch=x86_64
+ compiler=gcc
+ compiler.version=9
+ compiler.cppstd=17
+ compiler.libcxx=libstdc++11
+ build_type=Release
+
+ [buildenv]
+ CFLAGS=-flag0 -other=val
+ CXXFLAGS=-flag0 -other=val
+ LDFLAGS=-flag0 -other=val
+
+ [conf]
+ tools.build:cxxflags=["-flag1", "-flag2"]
+ tools.build:cflags=["-flag3", "-flag4"]
+ tools.build:ldflags=["-flag5", "-flag6"]
+ """)
+ t = TestClient()
+ t.save({"conanfile.txt": "[generators]\nMesonToolchain",
+ "profile": profile})
+
+ t.run("install . -pr=profile")
+ content = t.load(MesonToolchain.native_filename)
+ assert "cpp_args = ['-flag0', '-other=val', '-flag1', '-flag2']" in content
+ assert "c_args = ['-flag0', '-other=val', '-flag3', '-flag4']" in content
+ assert "c_link_args = ['-flag0', '-other=val', '-flag5', '-flag6']" in content
+ assert "cpp_link_args = ['-flag0', '-other=val', '-flag5', '-flag6']" in content
+
+
+@pytest.mark.skipif(sys.version_info.major == 2, reason="Meson not supported in Py2")
def test_correct_quotes():
profile = textwrap.dedent("""
[settings]
diff --git a/conans/test/unittests/client/toolchain/autotools/autotools_toolchain_test.py b/conans/test/unittests/client/toolchain/autotools/autotools_toolchain_test.py
index 9b459a447df..e31401076c7 100644
--- a/conans/test/unittests/client/toolchain/autotools/autotools_toolchain_test.py
+++ b/conans/test/unittests/client/toolchain/autotools/autotools_toolchain_test.py
@@ -258,7 +258,7 @@ def test_build_type_flag(compiler):
def test_apple_arch_flag():
conanfile = ConanFileMock()
- conanfile.conf = {"tools.apple:sdk_path": "/path/to/sdk"}
+ conanfile.conf.define("tools.apple:sdk_path", "/path/to/sdk")
conanfile.settings_build = MockSettings(
{"build_type": "Debug",
"os": "Macos",
@@ -290,7 +290,7 @@ def test_apple_arch_flag():
def test_apple_min_os_flag():
"""Even when no cross building it is adjusted because it could target a Mac version"""
conanfile = ConanFileMock()
- conanfile.conf = {"tools.apple:sdk_path": "/path/to/sdk"}
+ conanfile.conf.define("tools.apple:sdk_path", "/path/to/sdk")
conanfile.settings = MockSettings(
{"build_type": "Debug",
"os": "Macos",
@@ -308,7 +308,7 @@ def test_apple_min_os_flag():
def test_apple_isysrootflag():
"""Even when no cross building it is adjusted because it could target a Mac version"""
conanfile = ConanFileMock()
- conanfile.conf = {"tools.apple:sdk_path": "/path/to/sdk"}
+ conanfile.conf.define("tools.apple:sdk_path", "/path/to/sdk")
conanfile.settings_build = MockSettings(
{"build_type": "Debug",
"os": "Macos",
@@ -339,7 +339,7 @@ def test_apple_isysrootflag():
def test_custom_defines():
conanfile = ConanFileMock()
- conanfile.conf = {"tools.apple:sdk_path": "/path/to/sdk"}
+ conanfile.conf.define("tools.apple:sdk_path", "/path/to/sdk")
conanfile.settings = MockSettings(
{"build_type": "RelWithDebInfo",
"os": "iOS",
@@ -355,7 +355,7 @@ def test_custom_defines():
def test_custom_cxxflags():
conanfile = ConanFileMock()
- conanfile.conf = {"tools.apple:sdk_path": "/path/to/sdk"}
+ conanfile.conf.define("tools.apple:sdk_path", "/path/to/sdk")
conanfile.settings = MockSettings(
{"build_type": "RelWithDebInfo",
"os": "iOS",
@@ -374,7 +374,7 @@ def test_custom_cxxflags():
def test_custom_cflags():
conanfile = ConanFileMock()
- conanfile.conf = {"tools.apple:sdk_path": "/path/to/sdk"}
+ conanfile.conf.define("tools.apple:sdk_path", "/path/to/sdk")
conanfile.settings = MockSettings(
{"build_type": "RelWithDebInfo",
"os": "iOS",
@@ -393,7 +393,7 @@ def test_custom_cflags():
def test_custom_ldflags():
conanfile = ConanFileMock()
- conanfile.conf = {"tools.apple:sdk_path": "/path/to/sdk"}
+ conanfile.conf.define("tools.apple:sdk_path", "/path/to/sdk")
conanfile.settings = MockSettings(
{"build_type": "RelWithDebInfo",
"os": "iOS",
@@ -408,3 +408,22 @@ def test_custom_ldflags():
assert "MyFlag" not in env["CXXFLAGS"]
assert "MyFlag" not in env["CFLAGS"]
+
+
+def test_extra_flags_via_conf():
+ conanfile = ConanFileMock()
+ conanfile.conf.define("tools.build:cxxflags", ["--flag1", "--flag2"])
+ conanfile.conf.define("tools.build:cflags", ["--flag3", "--flag4"])
+ conanfile.conf.define("tools.build:ldflags", ["--flag5", "--flag6"])
+ conanfile.conf.define("tools.build:cppflags", ["DEF1", "DEF2"])
+ conanfile.settings = MockSettings(
+ {"build_type": "RelWithDebInfo",
+ "os": "iOS",
+ "os.version": "14",
+ "arch": "armv8"})
+ be = AutotoolsToolchain(conanfile)
+ env = be.vars()
+ assert '-DNDEBUG -DDEF1 -DDEF2' in env["CPPFLAGS"]
+ assert '-mios-version-min=14 --flag1 --flag2' in env["CXXFLAGS"]
+ assert '-mios-version-min=14 --flag3 --flag4' in env["CFLAGS"]
+ assert '-mios-version-min=14 --flag5 --flag6' in env["LDFLAGS"]
diff --git a/conans/test/unittests/model/test_conf.py b/conans/test/unittests/model/test_conf.py
index 1bb50e1f337..ed11f9761a3 100644
--- a/conans/test/unittests/model/test_conf.py
+++ b/conans/test/unittests/model/test_conf.py
@@ -212,6 +212,8 @@ def test_conf_get_check_type_and_default():
# Check type does not affect to default value
assert c.get("non:existing:conf", default=0, check_type=dict) == 0
assert c.get("zlib:user.company.check:shared") is None # unset value
+ assert c.get("zlib:user.company.check:shared", default=[]) == [] # returning default
+ assert c.get("zlib:user.company.check:shared", default=[], check_type=list) == [] # not raising exception
assert c.get("zlib:user.company.check:shared_str") == '"False"'
assert c.get("zlib:user.company.check:shared_str", check_type=bool) is False # smart conversion
assert c.get("zlib:user.company.check:static_str") == "off"
| [
{
"components": [
{
"doc": "This block is adding flags directly from user [conf] section",
"lines": [
550,
578
],
"name": "ExtraFlagsBlock",
"signature": "class ExtraFlagsBlock(Block):",
"type": "class"
},
{
"doc": "",... | [
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_extra_flags_via_conf",
"conans/test/integration/toolchains/gnu/test_autotoolstoolchain.py::test_extra_flags_via_conf",
"conans/test/integration/toolchains/meson/test_mesontoolchain.py::test_apple_meson_keep_user_custom_flags",
"conans/test... | [
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_cross_build",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_cross_build_user_toolchain",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_no_cross_build",
"conans/test/integration/toolchains/c... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
[conf] Global flags management
Changelog: Feature: Added mechanism to inject extra flags via `[conf]` into several toolchains like `AutotoolsToolchain`, `MesonToolchain` and `CMakeToolchain`.
* `tools.build:cxxflags`
* `tools.build:cflags`
* ~`tools.build:cppflags`~ -> Changed to `tools.build:defines` (https://github.com/conan-io/conan/pull/10928)
* ~`tools.build:ldflags`~ -> Removed (https://github.com/conan-io/conan/pull/10928)
* `tools.build:sharedlinkflags`
* `tools.build:exelinkflags`
Changelog: Fix: `Conf.get()` always returns `default` value if internal `conf_value.value` is `None`, i.e., it was unset.
Docs: https://github.com/conan-io/docs/pull/2484
Closes: https://github.com/conan-io/conan/issues/10805 https://github.com/conan-io/conan/issues/10452 https://github.com/conan-io/conan/issues/10895
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/cmake/toolchain/blocks.py]
(definition of ExtraFlagsBlock:)
class ExtraFlagsBlock(Block):
"""This block is adding flags directly from user [conf] section"""
(definition of ExtraFlagsBlock.context:)
def context(self):
[end of new definitions in conan/tools/cmake/toolchain/blocks.py]
[start of new definitions in conan/tools/gnu/autotoolstoolchain.py]
(definition of AutotoolsToolchain._get_cxx11_abi_define:)
def _get_cxx11_abi_define(self):
(definition of AutotoolsToolchain._get_libcxx_flag:)
def _get_libcxx_flag(self):
(definition of AutotoolsToolchain._filter_list_empty_fields:)
def _filter_list_empty_fields(v):
(definition of AutotoolsToolchain._get_extra_flags:)
def _get_extra_flags(self):
[end of new definitions in conan/tools/gnu/autotoolstoolchain.py]
[start of new definitions in conan/tools/meson/toolchain.py]
(definition of MesonToolchain._resolve_apple_flags:)
def _resolve_apple_flags(self):
(definition of MesonToolchain._get_extra_flags:)
def _get_extra_flags(self):
(definition of MesonToolchain._get_env_list:)
def _get_env_list(v):
(definition of MesonToolchain._filter_list_empty_fields:)
def _filter_list_empty_fields(v):
[end of new definitions in conan/tools/meson/toolchain.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
conan-io__conan-10743 | 10,743 | conan-io/conan | null | 31a5e7c78e980719be3978595abc5e33fe9c127f | 2022-03-09T11:59:47Z | diff --git a/conan/tools/env/environment.py b/conan/tools/env/environment.py
index 245aa44db87..f7bc478b727 100644
--- a/conan/tools/env/environment.py
+++ b/conan/tools/env/environment.py
@@ -4,7 +4,7 @@
from collections import OrderedDict
from contextlib import contextmanager
-from conan.tools.microsoft.subsystems import deduce_subsystem, WINDOWS
+from conans.client.subsystems import deduce_subsystem, WINDOWS, subsystem_path
from conans.errors import ConanException
from conans.util.files import save
@@ -14,7 +14,6 @@ class _EnvVarPlaceHolder:
def environment_wrap_command(env_filenames, cmd, subsystem=None, cwd=None):
- from conan.tools.microsoft.subsystems import subsystem_path
assert env_filenames
filenames = [env_filenames] if not isinstance(env_filenames, list) else env_filenames
bats, shs = [], []
@@ -131,7 +130,6 @@ def get_str(self, placeholder, subsystem, pathsep):
values.append(placeholder.format(name=self._name))
else:
if self._path:
- from conan.tools.microsoft.subsystems import subsystem_path
v = subsystem_path(subsystem, v)
values.append(v)
if self._path:
diff --git a/conan/tools/gnu/autotools.py b/conan/tools/gnu/autotools.py
index 8308cc8330a..8776e6dd705 100644
--- a/conan/tools/gnu/autotools.py
+++ b/conan/tools/gnu/autotools.py
@@ -2,7 +2,7 @@
from conan.tools.build import build_jobs
from conan.tools.files.files import load_toolchain_args
-from conan.tools.microsoft.subsystems import subsystem_path, deduce_subsystem
+from conans.client.subsystems import subsystem_path, deduce_subsystem
from conans.client.build import join_arguments
diff --git a/conan/tools/gnu/gnudeps_flags.py b/conan/tools/gnu/gnudeps_flags.py
index 520bd3dc967..53227e13c5b 100644
--- a/conan/tools/gnu/gnudeps_flags.py
+++ b/conan/tools/gnu/gnudeps_flags.py
@@ -5,7 +5,7 @@
from conan.tools.microsoft import is_msvc
from conan.tools.apple.apple import is_apple_os
-from conan.tools.microsoft.subsystems import subsystem_path, deduce_subsystem
+from conans.client.subsystems import subsystem_path, deduce_subsystem
class GnuDepsFlags(object):
diff --git a/conan/tools/microsoft/__init__.py b/conan/tools/microsoft/__init__.py
index e86a8b19dd3..7ddb614d6f0 100644
--- a/conan/tools/microsoft/__init__.py
+++ b/conan/tools/microsoft/__init__.py
@@ -2,5 +2,5 @@
from conan.tools.microsoft.msbuild import MSBuild
from conan.tools.microsoft.msbuilddeps import MSBuildDeps
from conan.tools.microsoft.visual import msvc_runtime_flag, VCVars, is_msvc, is_msvc_static_runtime
-from conan.tools.microsoft.subsystems import subsystem_path
from conan.tools.microsoft.layout import vs_layout
+from conan.tools.microsoft.subsystems import unix_path
diff --git a/conan/tools/microsoft/subsystems.py b/conan/tools/microsoft/subsystems.py
index d75a9ba62f1..651b9cb69b5 100644
--- a/conan/tools/microsoft/subsystems.py
+++ b/conan/tools/microsoft/subsystems.py
@@ -1,174 +1,8 @@
-import os
-import platform
-import re
-import subprocess
+from conans.client.subsystems import deduce_subsystem, subsystem_path
-from conans.errors import ConanException
-WINDOWS = "windows"
-MSYS2 = 'msys2'
-MSYS = 'msys'
-CYGWIN = 'cygwin'
-WSL = 'wsl' # Windows Subsystem for Linux
-SFU = 'sfu' # Windows Services for UNIX
-
-
-def run_in_windows_bash(conanfile, command, cwd=None, env=None):
- from conan.tools.env import Environment
- from conan.tools.env.environment import environment_wrap_command
- """ Will run a unix command inside a bash terminal It requires to have MSYS2, CYGWIN, or WSL"""
- if env:
- # Passing env invalidates the conanfile.environment_scripts
- env_win = [env] if not isinstance(env, list) else env
- env_shell = []
- else:
- env_shell = ["conanbuild.sh"]
- env_win = ["conanbuild.bat"]
-
- subsystem = conanfile.conf.get("tools.microsoft.bash:subsystem")
- shell_path = conanfile.conf.get("tools.microsoft.bash:path")
-
- if not platform.system() == "Windows":
- raise ConanException("Command only for Windows operating system")
-
- if not subsystem or not shell_path:
- raise ConanException("The config 'tools.microsoft.bash:subsystem' and 'tools.microsoft.bash:path' are "
- "needed to run commands in a Windows subsystem")
- if subsystem == MSYS2:
- # Configure MSYS2 to inherith the PATH
- msys2_mode_env = Environment()
- _msystem = {"x86": "MINGW32"}.get(conanfile.settings.get_safe("arch"), "MINGW64")
- msys2_mode_env.define("MSYSTEM", _msystem)
- msys2_mode_env.define("MSYS2_PATH_TYPE", "inherit")
- path = os.path.join(conanfile.generators_folder, "msys2_mode.bat")
- msys2_mode_env.vars(conanfile, "build").save_bat(path)
- env_win.append(path)
-
- # Needed to change to that dir inside the bash shell
- wrapped_shell = '"%s"' % shell_path if " " in shell_path else shell_path
- if env_win:
- wrapped_shell = environment_wrap_command(env_win, shell_path,
- cwd=conanfile.generators_folder)
-
- cwd = cwd or os.getcwd()
- if not os.path.isabs(cwd):
- cwd = os.path.join(os.getcwd(), cwd)
- cwd_inside = subsystem_path(subsystem, cwd)
- wrapped_user_cmd = command
- if env_shell:
- # Wrapping the inside_command enable to prioritize our environment, otherwise /usr/bin go
- # first and there could be commands that we want to skip
- wrapped_user_cmd = environment_wrap_command(env_shell, command,
- cwd=conanfile.generators_folder)
- inside_command = 'cd "{cwd_inside}" && ' \
- '{wrapped_user_cmd}'.format(cwd_inside=cwd_inside,
- wrapped_user_cmd=wrapped_user_cmd)
-
- inside_command = escape_windows_cmd(inside_command)
-
- final_command = 'cd "{cwd}" && {wrapped_shell} --login -c {inside_command}'.format(
- cwd=cwd,
- wrapped_shell=wrapped_shell,
- inside_command=inside_command)
- conanfile.output.info('Running in windows bash: %s' % final_command)
- return conanfile._conan_runner(final_command, output=conanfile.output, subprocess=True)
-
-
-def escape_windows_cmd(command):
- """ To use in a regular windows cmd.exe
- 1. Adds escapes so the argument can be unpacked by CommandLineToArgvW()
- 2. Adds escapes for cmd.exe so the argument survives cmd.exe's substitutions.
-
- Useful to escape commands to be executed in a windows bash (msys2, cygwin etc)
- """
- quoted_arg = subprocess.list2cmdline([command])
- return "".join(["^%s" % arg if arg in r'()%!^"<>&|' else arg for arg in quoted_arg])
-
-
-def deduce_subsystem(conanfile, scope):
- if scope.startswith("build"):
- if hasattr(conanfile, "settings_build"):
- the_os = conanfile.settings_build.get_safe("os")
- subsystem = conanfile.settings_build.get_safe("os.subsystem")
- else:
- the_os = platform.system() # FIXME: Temporary fallback until 2.0
- subsystem = None
- else:
- the_os = conanfile.settings.get_safe("os")
- subsystem = conanfile.settings.get_safe("os.subsystem")
-
- if not str(the_os).startswith("Windows"):
- return None
-
- if subsystem is None and not scope.startswith("build"): # "run" scope do not follow win_bash
- return WINDOWS
-
- if subsystem is None: # Not defined by settings, so native windows
- if not conanfile.win_bash:
- return WINDOWS
-
- subsystem = conanfile.conf.get("tools.microsoft.bash:subsystem")
- if not subsystem:
- raise ConanException("The config 'tools.microsoft.bash:subsystem' is "
- "needed to run commands in a Windows subsystem")
- return subsystem
-
-
-def subsystem_path(subsystem, path):
- """"Used to translate windows paths to MSYS unix paths like
- c/users/path/to/file. Not working in a regular console or MinGW!
- """
- if subsystem is None or subsystem == WINDOWS:
+def unix_path(conanfile, path):
+ if not conanfile.win_bash:
return path
-
- if os.path.exists(path):
- # if the path doesn't exist (and abs) we cannot guess the casing
- path = get_cased_path(path)
-
- if path.startswith('\\\\?\\'):
- path = path[4:]
- path = path.replace(":/", ":\\")
- append_prefix = re.match(r'[a-z]:\\', path, re.IGNORECASE)
- pattern = re.compile(r'([a-z]):\\', re.IGNORECASE)
- path = pattern.sub('/\\1/', path).replace('\\', '/')
-
- if append_prefix:
- if subsystem in (MSYS, MSYS2):
- return path.lower()
- elif subsystem == CYGWIN:
- return '/cygdrive' + path.lower()
- elif subsystem == WSL:
- return '/mnt' + path[0:2].lower() + path[2:]
- elif subsystem == SFU:
- path = path.lower()
- return '/dev/fs' + path[0] + path[1:].capitalize()
- else:
- return path if subsystem == WSL else path.lower()
- return None
-
-
-def get_cased_path(name):
- if platform.system() != "Windows":
- return name
- if not os.path.isabs(name):
- name = os.path.abspath(name)
-
- result = []
- current = name
- while True:
- parent, child = os.path.split(current)
- if parent == current:
- break
-
- child_cased = child
- if os.path.exists(parent):
- children = os.listdir(parent)
- for c in children:
- if c.upper() == child.upper():
- child_cased = c
- break
- result.append(child_cased)
- current = parent
- drive, _ = os.path.splitdrive(current)
- result.append(drive)
- return os.sep.join(reversed(result))
+ subsystem = deduce_subsystem(conanfile, scope="build")
+ return subsystem_path(subsystem, path)
diff --git a/conans/client/generators/__init__.py b/conans/client/generators/__init__.py
index 46629495587..526afd6ca27 100644
--- a/conans/client/generators/__init__.py
+++ b/conans/client/generators/__init__.py
@@ -3,11 +3,11 @@
from os.path import join
from conan.tools.env import VirtualRunEnv
-from conan.tools.microsoft.subsystems import deduce_subsystem
from conans.client.generators.cmake_find_package import CMakeFindPackageGenerator
from conans.client.generators.cmake_find_package_multi import CMakeFindPackageMultiGenerator
from conans.client.generators.compiler_args import CompilerArgsGenerator
from conans.client.generators.pkg_config import PkgConfigGenerator
+from conans.client.subsystems import deduce_subsystem, subsystem_path
from conans.errors import ConanException, conanfile_exception_formatter
from conans.util.env_reader import get_env
from conans.util.files import normalize, save, mkdir
@@ -264,7 +264,6 @@ def write_toolchain(conanfile, path, output):
def _generate_aggregated_env(conanfile):
- from conan.tools.microsoft.subsystems import subsystem_path
def deactivates(filenames):
# FIXME: Probably the order needs to be reversed
diff --git a/conans/client/subsystems.py b/conans/client/subsystems.py
new file mode 100644
index 00000000000..d75a9ba62f1
--- /dev/null
+++ b/conans/client/subsystems.py
@@ -0,0 +1,174 @@
+import os
+import platform
+import re
+import subprocess
+
+from conans.errors import ConanException
+
+WINDOWS = "windows"
+MSYS2 = 'msys2'
+MSYS = 'msys'
+CYGWIN = 'cygwin'
+WSL = 'wsl' # Windows Subsystem for Linux
+SFU = 'sfu' # Windows Services for UNIX
+
+
+def run_in_windows_bash(conanfile, command, cwd=None, env=None):
+ from conan.tools.env import Environment
+ from conan.tools.env.environment import environment_wrap_command
+ """ Will run a unix command inside a bash terminal It requires to have MSYS2, CYGWIN, or WSL"""
+ if env:
+ # Passing env invalidates the conanfile.environment_scripts
+ env_win = [env] if not isinstance(env, list) else env
+ env_shell = []
+ else:
+ env_shell = ["conanbuild.sh"]
+ env_win = ["conanbuild.bat"]
+
+ subsystem = conanfile.conf.get("tools.microsoft.bash:subsystem")
+ shell_path = conanfile.conf.get("tools.microsoft.bash:path")
+
+ if not platform.system() == "Windows":
+ raise ConanException("Command only for Windows operating system")
+
+ if not subsystem or not shell_path:
+ raise ConanException("The config 'tools.microsoft.bash:subsystem' and 'tools.microsoft.bash:path' are "
+ "needed to run commands in a Windows subsystem")
+ if subsystem == MSYS2:
+ # Configure MSYS2 to inherith the PATH
+ msys2_mode_env = Environment()
+ _msystem = {"x86": "MINGW32"}.get(conanfile.settings.get_safe("arch"), "MINGW64")
+ msys2_mode_env.define("MSYSTEM", _msystem)
+ msys2_mode_env.define("MSYS2_PATH_TYPE", "inherit")
+ path = os.path.join(conanfile.generators_folder, "msys2_mode.bat")
+ msys2_mode_env.vars(conanfile, "build").save_bat(path)
+ env_win.append(path)
+
+ # Needed to change to that dir inside the bash shell
+ wrapped_shell = '"%s"' % shell_path if " " in shell_path else shell_path
+ if env_win:
+ wrapped_shell = environment_wrap_command(env_win, shell_path,
+ cwd=conanfile.generators_folder)
+
+ cwd = cwd or os.getcwd()
+ if not os.path.isabs(cwd):
+ cwd = os.path.join(os.getcwd(), cwd)
+ cwd_inside = subsystem_path(subsystem, cwd)
+ wrapped_user_cmd = command
+ if env_shell:
+ # Wrapping the inside_command enable to prioritize our environment, otherwise /usr/bin go
+ # first and there could be commands that we want to skip
+ wrapped_user_cmd = environment_wrap_command(env_shell, command,
+ cwd=conanfile.generators_folder)
+ inside_command = 'cd "{cwd_inside}" && ' \
+ '{wrapped_user_cmd}'.format(cwd_inside=cwd_inside,
+ wrapped_user_cmd=wrapped_user_cmd)
+
+ inside_command = escape_windows_cmd(inside_command)
+
+ final_command = 'cd "{cwd}" && {wrapped_shell} --login -c {inside_command}'.format(
+ cwd=cwd,
+ wrapped_shell=wrapped_shell,
+ inside_command=inside_command)
+ conanfile.output.info('Running in windows bash: %s' % final_command)
+ return conanfile._conan_runner(final_command, output=conanfile.output, subprocess=True)
+
+
+def escape_windows_cmd(command):
+ """ To use in a regular windows cmd.exe
+ 1. Adds escapes so the argument can be unpacked by CommandLineToArgvW()
+ 2. Adds escapes for cmd.exe so the argument survives cmd.exe's substitutions.
+
+ Useful to escape commands to be executed in a windows bash (msys2, cygwin etc)
+ """
+ quoted_arg = subprocess.list2cmdline([command])
+ return "".join(["^%s" % arg if arg in r'()%!^"<>&|' else arg for arg in quoted_arg])
+
+
+def deduce_subsystem(conanfile, scope):
+ if scope.startswith("build"):
+ if hasattr(conanfile, "settings_build"):
+ the_os = conanfile.settings_build.get_safe("os")
+ subsystem = conanfile.settings_build.get_safe("os.subsystem")
+ else:
+ the_os = platform.system() # FIXME: Temporary fallback until 2.0
+ subsystem = None
+ else:
+ the_os = conanfile.settings.get_safe("os")
+ subsystem = conanfile.settings.get_safe("os.subsystem")
+
+ if not str(the_os).startswith("Windows"):
+ return None
+
+ if subsystem is None and not scope.startswith("build"): # "run" scope do not follow win_bash
+ return WINDOWS
+
+ if subsystem is None: # Not defined by settings, so native windows
+ if not conanfile.win_bash:
+ return WINDOWS
+
+ subsystem = conanfile.conf.get("tools.microsoft.bash:subsystem")
+ if not subsystem:
+ raise ConanException("The config 'tools.microsoft.bash:subsystem' is "
+ "needed to run commands in a Windows subsystem")
+ return subsystem
+
+
+def subsystem_path(subsystem, path):
+ """"Used to translate windows paths to MSYS unix paths like
+ c/users/path/to/file. Not working in a regular console or MinGW!
+ """
+ if subsystem is None or subsystem == WINDOWS:
+ return path
+
+ if os.path.exists(path):
+ # if the path doesn't exist (and abs) we cannot guess the casing
+ path = get_cased_path(path)
+
+ if path.startswith('\\\\?\\'):
+ path = path[4:]
+ path = path.replace(":/", ":\\")
+ append_prefix = re.match(r'[a-z]:\\', path, re.IGNORECASE)
+ pattern = re.compile(r'([a-z]):\\', re.IGNORECASE)
+ path = pattern.sub('/\\1/', path).replace('\\', '/')
+
+ if append_prefix:
+ if subsystem in (MSYS, MSYS2):
+ return path.lower()
+ elif subsystem == CYGWIN:
+ return '/cygdrive' + path.lower()
+ elif subsystem == WSL:
+ return '/mnt' + path[0:2].lower() + path[2:]
+ elif subsystem == SFU:
+ path = path.lower()
+ return '/dev/fs' + path[0] + path[1:].capitalize()
+ else:
+ return path if subsystem == WSL else path.lower()
+ return None
+
+
+def get_cased_path(name):
+ if platform.system() != "Windows":
+ return name
+ if not os.path.isabs(name):
+ name = os.path.abspath(name)
+
+ result = []
+ current = name
+ while True:
+ parent, child = os.path.split(current)
+ if parent == current:
+ break
+
+ child_cased = child
+ if os.path.exists(parent):
+ children = os.listdir(parent)
+ for c in children:
+ if c.upper() == child.upper():
+ child_cased = c
+ break
+ result.append(child_cased)
+ current = parent
+ drive, _ = os.path.splitdrive(current)
+ result.append(drive)
+ return os.sep.join(reversed(result))
diff --git a/conans/model/conan_file.py b/conans/model/conan_file.py
index 84847abc73f..8939a223b99 100644
--- a/conans/model/conan_file.py
+++ b/conans/model/conan_file.py
@@ -385,7 +385,7 @@ def _run(cmd, _env):
return tools.run_in_windows_bash(self, bashcmd=cmd, cwd=cwd, subsystem=subsystem,
msys_mingw=msys_mingw, with_login=with_login)
elif self.win_bash: # New, Conan 2.0
- from conan.tools.microsoft.subsystems import run_in_windows_bash
+ from conans.client.subsystems import run_in_windows_bash
return run_in_windows_bash(self, command=cmd, cwd=cwd, env=_env)
if _env is None:
_env = "conanbuild"
| diff --git a/conans/test/unittests/tools/env/test_env.py b/conans/test/unittests/tools/env/test_env.py
index 0eeeef81821..4845b58d2c3 100644
--- a/conans/test/unittests/tools/env/test_env.py
+++ b/conans/test/unittests/tools/env/test_env.py
@@ -7,7 +7,7 @@
from conan.tools.env import Environment
from conan.tools.env.environment import ProfileEnvironment
-from conan.tools.microsoft.subsystems import WINDOWS
+from conans.client.subsystems import WINDOWS
from conans.client.tools import chdir, environment_append
from conans.test.utils.mocks import ConanFileMock, MockSettings
from conans.test.utils.test_files import temp_folder
diff --git a/conans/test/unittests/tools/microsoft/test_subsystem.py b/conans/test/unittests/tools/microsoft/test_subsystem.py
new file mode 100644
index 00000000000..07a43d329d1
--- /dev/null
+++ b/conans/test/unittests/tools/microsoft/test_subsystem.py
@@ -0,0 +1,31 @@
+import textwrap
+
+import pytest
+
+from conan.tools.microsoft import unix_path
+from conans.model.conf import ConfDefinition
+from conans.test.utils.mocks import MockSettings, ConanFileMock
+
+
+@pytest.mark.parametrize("subsystem, expected_path", [
+ ("msys2", '/c/path/to/stuff'),
+ ("msys", '/c/path/to/stuff'),
+ ("cygwin", '/cygdrive/c/path/to/stuff'),
+ ("wsl", '/mnt/c/path/to/stuff'),
+ ("sfu", '/dev/fs/C/path/to/stuff')
+])
+def test_unix_path(subsystem, expected_path):
+ c = ConfDefinition()
+ c.loads(textwrap.dedent("""\
+ tools.microsoft.bash:subsystem={}
+ """.format(subsystem)))
+
+ settings = MockSettings({"os": "Windows"})
+ conanfile = ConanFileMock()
+ conanfile.conf = c.get_conanfile_conf(None)
+ conanfile.settings = settings
+ conanfile.settings_build = settings
+ conanfile.win_bash = True
+
+ path = unix_path(conanfile, "c:/path/to/stuff")
+ assert expected_path == path
| [
{
"components": [
{
"doc": "",
"lines": [
4,
8
],
"name": "unix_path",
"signature": "def unix_path(conanfile, path):",
"type": "function"
}
],
"file": "conan/tools/microsoft/subsystems.py"
},
{
"components": [
... | [
"conans/test/unittests/tools/env/test_env.py::test_compose",
"conans/test/unittests/tools/env/test_env.py::test_define_append",
"conans/test/unittests/tools/env/test_env.py::test_compose_combinations[define-Val1-",
"conans/test/unittests/tools/env/test_env.py::test_compose_combinations[append-Val1-",
"conan... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Refactor subsystem code
Changelog: Feature: New `conan.tool.microsoft.unix_path` to convert paths to any subsystem when using `conanfile.win_bash`.
Changelog: Fix: Removed `subsystem_path` from the `conan.tool.microsoft` namespace, superseded by `unix_path`.
Docs: https://github.com/conan-io/docs/pull/2479
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/microsoft/subsystems.py]
(definition of unix_path:)
def unix_path(conanfile, path):
[end of new definitions in conan/tools/microsoft/subsystems.py]
[start of new definitions in conans/client/subsystems.py]
(definition of run_in_windows_bash:)
def run_in_windows_bash(conanfile, command, cwd=None, env=None):
(definition of escape_windows_cmd:)
def escape_windows_cmd(command):
"""To use in a regular windows cmd.exe
1. Adds escapes so the argument can be unpacked by CommandLineToArgvW()
2. Adds escapes for cmd.exe so the argument survives cmd.exe's substitutions.
Useful to escape commands to be executed in a windows bash (msys2, cygwin etc)"""
(definition of deduce_subsystem:)
def deduce_subsystem(conanfile, scope):
(definition of subsystem_path:)
def subsystem_path(subsystem, path):
""""Used to translate windows paths to MSYS unix paths like
c/users/path/to/file. Not working in a regular console or MinGW!"""
(definition of get_cased_path:)
def get_cased_path(name):
[end of new definitions in conans/client/subsystems.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
pallets__flask-4479 | 4,479 | pallets/flask | 2.1 | 425a62686f094de236a5a9eb1d6885de4730efa5 | 2022-03-08T21:46:37Z | diff --git a/CHANGES.rst b/CHANGES.rst
index e471d6a8a6..bab6690b9a 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -55,6 +55,10 @@ Unreleased
- From Werkzeug, for redirect responses the ``Location`` header URL
will remain relative, and exclude the scheme and domain, by default.
:pr:`4496`
+- Add ``Config.from_prefixed_env()`` to load config values from
+ environment variables that start with ``FLASK_`` or another prefix.
+ This parses values as JSON by default, and allows setting keys in
+ nested dicts. :pr:`4479`
Version 2.0.3
diff --git a/docs/config.rst b/docs/config.rst
index 0b86674d88..7a5e4da1e1 100644
--- a/docs/config.rst
+++ b/docs/config.rst
@@ -515,11 +515,14 @@ Or from a JSON file:
Configuring from Environment Variables
--------------------------------------
-In addition to pointing to configuration files using environment variables, you
-may find it useful (or necessary) to control your configuration values directly
-from the environment.
+In addition to pointing to configuration files using environment
+variables, you may find it useful (or necessary) to control your
+configuration values directly from the environment. Flask can be
+instructed to load all environment variables starting with a specific
+prefix into the config using :meth:`~flask.Config.from_prefixed_env`.
-Environment variables can be set in the shell before starting the server:
+Environment variables can be set in the shell before starting the
+server:
.. tabs::
@@ -527,8 +530,8 @@ Environment variables can be set in the shell before starting the server:
.. code-block:: text
- $ export SECRET_KEY="5f352379324c22463451387a0aec5d2f"
- $ export MAIL_ENABLED=false
+ $ export FLASK_SECRET_KEY="5f352379324c22463451387a0aec5d2f"
+ $ export FLASK_MAIL_ENABLED=false
$ flask run
* Running on http://127.0.0.1:5000/
@@ -536,8 +539,8 @@ Environment variables can be set in the shell before starting the server:
.. code-block:: text
- $ set -x SECRET_KEY "5f352379324c22463451387a0aec5d2f"
- $ set -x MAIL_ENABLED false
+ $ set -x FLASK_SECRET_KEY "5f352379324c22463451387a0aec5d2f"
+ $ set -x FLASK_MAIL_ENABLED false
$ flask run
* Running on http://127.0.0.1:5000/
@@ -545,8 +548,8 @@ Environment variables can be set in the shell before starting the server:
.. code-block:: text
- > set SECRET_KEY="5f352379324c22463451387a0aec5d2f"
- > set MAIL_ENABLED=false
+ > set FLASK_SECRET_KEY="5f352379324c22463451387a0aec5d2f"
+ > set FLASK_MAIL_ENABLED=false
> flask run
* Running on http://127.0.0.1:5000/
@@ -554,36 +557,51 @@ Environment variables can be set in the shell before starting the server:
.. code-block:: text
- > $env:SECRET_KEY = "5f352379324c22463451387a0aec5d2f"
- > $env:MAIL_ENABLED = "false"
+ > $env:FLASK_SECRET_KEY = "5f352379324c22463451387a0aec5d2f"
+ > $env:FLASK_MAIL_ENABLED = "false"
> flask run
* Running on http://127.0.0.1:5000/
-While this approach is straightforward to use, it is important to remember that
-environment variables are strings -- they are not automatically deserialized
-into Python types.
+The variables can then be loaded and accessed via the config with a key
+equal to the environment variable name without the prefix i.e.
-Here is an example of a configuration file that uses environment variables::
+.. code-block:: python
+
+ app.config.from_prefixed_env()
+ app.config["SECRET_KEY"] # Is "5f352379324c22463451387a0aec5d2f"
+
+The prefix is ``FLASK_`` by default. This is configurable via the
+``prefix`` argument of :meth:`~flask.Config.from_prefixed_env`.
- import os
+Values will be parsed to attempt to convert them to a more specific type
+than strings. By default :func:`json.loads` is used, so any valid JSON
+value is possible, including lists and dicts. This is configurable via
+the ``loads`` argument of :meth:`~flask.Config.from_prefixed_env`.
- _mail_enabled = os.environ.get("MAIL_ENABLED", default="true")
- MAIL_ENABLED = _mail_enabled.lower() in {"1", "t", "true"}
+When adding a boolean value with the default JSON parsing, only "true"
+and "false", lowercase, are valid values. Keep in mind that any
+non-empty string is considered ``True`` by Python.
- SECRET_KEY = os.environ.get("SECRET_KEY")
+It is possible to set keys in nested dictionaries by separating the
+keys with double underscore (``__``). Any intermediate keys that don't
+exist on the parent dict will be initialized to an empty dict.
- if not SECRET_KEY:
- raise ValueError("No SECRET_KEY set for Flask application")
+.. code-block:: text
+ $ export FLASK_MYAPI__credentials__username=user123
+
+.. code-block:: python
-Notice that any value besides an empty string will be interpreted as a boolean
-``True`` value in Python, which requires care if an environment explicitly sets
-values intended to be ``False``.
+ app.config["MYAPI"]["credentials"]["username"] # Is "user123"
-Make sure to load the configuration very early on, so that extensions have the
-ability to access the configuration when starting up. There are other methods
-on the config object as well to load from individual files. For a complete
-reference, read the :class:`~flask.Config` class documentation.
+On Windows, environment variable keys are always uppercase, therefore
+the above example would end up as ``MYAPI__CREDENTIALS__USERNAME``.
+
+For even more config loading features, including merging and
+case-insensitive Windows support, try a dedicated library such as
+Dynaconf_, which includes integration with Flask.
+
+.. _Dynaconf: https://www.dynaconf.com/
Configuration Best Practices
@@ -603,6 +621,10 @@ that experience:
limit yourself to request-only accesses to the configuration you can
reconfigure the object later on as needed.
+3. Make sure to load the configuration very early on, so that
+ extensions can access the configuration when calling ``init_app``.
+
+
.. _config-dev-prod:
Development / Production
diff --git a/src/flask/config.py b/src/flask/config.py
index 9657edc80a..a266ea1d0a 100644
--- a/src/flask/config.py
+++ b/src/flask/config.py
@@ -1,4 +1,5 @@
import errno
+import json
import os
import types
import typing as t
@@ -6,6 +7,13 @@
from werkzeug.utils import import_string
+def _json_loads(raw: t.Union[str, bytes]) -> t.Any:
+ try:
+ return json.loads(raw)
+ except json.JSONDecodeError:
+ return raw
+
+
class ConfigAttribute:
"""Makes an attribute forward to the config"""
@@ -70,7 +78,7 @@ class Config(dict):
"""
def __init__(self, root_path: str, defaults: t.Optional[dict] = None) -> None:
- dict.__init__(self, defaults or {})
+ super().__init__(defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name: str, silent: bool = False) -> bool:
@@ -97,6 +105,70 @@ def from_envvar(self, variable_name: str, silent: bool = False) -> bool:
)
return self.from_pyfile(rv, silent=silent)
+ def from_prefixed_env(
+ self, prefix: str = "FLASK", *, loads: t.Callable[[str], t.Any] = json.loads
+ ) -> bool:
+ """Load any environment variables that start with ``FLASK_``,
+ dropping the prefix from the env key for the config key. Values
+ are passed through a loading function to attempt to convert them
+ to more specific types than strings.
+
+ Keys are loaded in :func:`sorted` order.
+
+ The default loading function attempts to parse values as any
+ valid JSON type, including dicts and lists.
+
+ Specific items in nested dicts can be set by separating the
+ keys with double underscores (``__``). If an intermediate key
+ doesn't exist, it will be initialized to an empty dict.
+
+ :param prefix: Load env vars that start with this prefix,
+ separated with an underscore (``_``).
+ :param loads: Pass each string value to this function and use
+ the returned value as the config value. If any error is
+ raised it is ignored and the value remains a string. The
+ default is :func:`json.loads`.
+
+ .. versionadded:: 2.1
+ """
+ prefix = f"{prefix}_"
+ len_prefix = len(prefix)
+
+ for key in sorted(os.environ):
+ if not key.startswith(prefix):
+ continue
+
+ value = os.environ[key]
+
+ try:
+ value = loads(value)
+ except Exception:
+ # Keep the value as a string if loading failed.
+ pass
+
+ # Change to key.removeprefix(prefix) on Python >= 3.9.
+ key = key[len_prefix:]
+
+ if "__" not in key:
+ # A non-nested key, set directly.
+ self[key] = value
+ continue
+
+ # Traverse nested dictionaries with keys separated by "__".
+ current = self
+ *parts, tail = key.split("__")
+
+ for part in parts:
+ # If an intermediate dict does not exist, create it.
+ if part not in current:
+ current[part] = {}
+
+ current = current[part]
+
+ current[tail] = value
+
+ return True
+
def from_pyfile(self, filename: str, silent: bool = False) -> bool:
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
| diff --git a/tests/test_config.py b/tests/test_config.py
index a3cd3d25bd..bbe4f1e2d3 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -38,6 +38,68 @@ def test_config_from_file():
common_object_test(app)
+def test_from_prefixed_env(monkeypatch):
+ monkeypatch.setenv("FLASK_STRING", "value")
+ monkeypatch.setenv("FLASK_BOOL", "true")
+ monkeypatch.setenv("FLASK_INT", "1")
+ monkeypatch.setenv("FLASK_FLOAT", "1.2")
+ monkeypatch.setenv("FLASK_LIST", "[1, 2]")
+ monkeypatch.setenv("FLASK_DICT", '{"k": "v"}')
+ monkeypatch.setenv("NOT_FLASK_OTHER", "other")
+
+ app = flask.Flask(__name__)
+ app.config.from_prefixed_env()
+
+ assert app.config["STRING"] == "value"
+ assert app.config["BOOL"] is True
+ assert app.config["INT"] == 1
+ assert app.config["FLOAT"] == 1.2
+ assert app.config["LIST"] == [1, 2]
+ assert app.config["DICT"] == {"k": "v"}
+ assert "OTHER" not in app.config
+
+
+def test_from_prefixed_env_custom_prefix(monkeypatch):
+ monkeypatch.setenv("FLASK_A", "a")
+ monkeypatch.setenv("NOT_FLASK_A", "b")
+
+ app = flask.Flask(__name__)
+ app.config.from_prefixed_env("NOT_FLASK")
+
+ assert app.config["A"] == "b"
+
+
+def test_from_prefixed_env_nested(monkeypatch):
+ monkeypatch.setenv("FLASK_EXIST__ok", "other")
+ monkeypatch.setenv("FLASK_EXIST__inner__ik", "2")
+ monkeypatch.setenv("FLASK_EXIST__new__more", '{"k": false}')
+ monkeypatch.setenv("FLASK_NEW__K", "v")
+
+ app = flask.Flask(__name__)
+ app.config["EXIST"] = {"ok": "value", "flag": True, "inner": {"ik": 1}}
+ app.config.from_prefixed_env()
+
+ if os.name != "nt":
+ assert app.config["EXIST"] == {
+ "ok": "other",
+ "flag": True,
+ "inner": {"ik": 2},
+ "new": {"more": {"k": False}},
+ }
+ else:
+ # Windows env var keys are always uppercase.
+ assert app.config["EXIST"] == {
+ "ok": "value",
+ "OK": "other",
+ "flag": True,
+ "inner": {"ik": 1},
+ "INNER": {"IK": 2},
+ "NEW": {"MORE": {"k": False}},
+ }
+
+ assert app.config["NEW"] == {"K": "v"}
+
+
def test_config_from_mapping():
app = flask.Flask(__name__)
app.config.from_mapping({"SECRET_KEY": "config", "TEST_KEY": "foo"})
| diff --git a/CHANGES.rst b/CHANGES.rst
index e471d6a8a6..bab6690b9a 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -55,6 +55,10 @@ Unreleased
- From Werkzeug, for redirect responses the ``Location`` header URL
will remain relative, and exclude the scheme and domain, by default.
:pr:`4496`
+- Add ``Config.from_prefixed_env()`` to load config values from
+ environment variables that start with ``FLASK_`` or another prefix.
+ This parses values as JSON by default, and allows setting keys in
+ nested dicts. :pr:`4479`
Version 2.0.3
diff --git a/docs/config.rst b/docs/config.rst
index 0b86674d88..7a5e4da1e1 100644
--- a/docs/config.rst
+++ b/docs/config.rst
@@ -515,11 +515,14 @@ Or from a JSON file:
Configuring from Environment Variables
--------------------------------------
-In addition to pointing to configuration files using environment variables, you
-may find it useful (or necessary) to control your configuration values directly
-from the environment.
+In addition to pointing to configuration files using environment
+variables, you may find it useful (or necessary) to control your
+configuration values directly from the environment. Flask can be
+instructed to load all environment variables starting with a specific
+prefix into the config using :meth:`~flask.Config.from_prefixed_env`.
-Environment variables can be set in the shell before starting the server:
+Environment variables can be set in the shell before starting the
+server:
.. tabs::
@@ -527,8 +530,8 @@ Environment variables can be set in the shell before starting the server:
.. code-block:: text
- $ export SECRET_KEY="5f352379324c22463451387a0aec5d2f"
- $ export MAIL_ENABLED=false
+ $ export FLASK_SECRET_KEY="5f352379324c22463451387a0aec5d2f"
+ $ export FLASK_MAIL_ENABLED=false
$ flask run
* Running on http://127.0.0.1:5000/
@@ -536,8 +539,8 @@ Environment variables can be set in the shell before starting the server:
.. code-block:: text
- $ set -x SECRET_KEY "5f352379324c22463451387a0aec5d2f"
- $ set -x MAIL_ENABLED false
+ $ set -x FLASK_SECRET_KEY "5f352379324c22463451387a0aec5d2f"
+ $ set -x FLASK_MAIL_ENABLED false
$ flask run
* Running on http://127.0.0.1:5000/
@@ -545,8 +548,8 @@ Environment variables can be set in the shell before starting the server:
.. code-block:: text
- > set SECRET_KEY="5f352379324c22463451387a0aec5d2f"
- > set MAIL_ENABLED=false
+ > set FLASK_SECRET_KEY="5f352379324c22463451387a0aec5d2f"
+ > set FLASK_MAIL_ENABLED=false
> flask run
* Running on http://127.0.0.1:5000/
@@ -554,36 +557,51 @@ Environment variables can be set in the shell before starting the server:
.. code-block:: text
- > $env:SECRET_KEY = "5f352379324c22463451387a0aec5d2f"
- > $env:MAIL_ENABLED = "false"
+ > $env:FLASK_SECRET_KEY = "5f352379324c22463451387a0aec5d2f"
+ > $env:FLASK_MAIL_ENABLED = "false"
> flask run
* Running on http://127.0.0.1:5000/
-While this approach is straightforward to use, it is important to remember that
-environment variables are strings -- they are not automatically deserialized
-into Python types.
+The variables can then be loaded and accessed via the config with a key
+equal to the environment variable name without the prefix i.e.
-Here is an example of a configuration file that uses environment variables::
+.. code-block:: python
+
+ app.config.from_prefixed_env()
+ app.config["SECRET_KEY"] # Is "5f352379324c22463451387a0aec5d2f"
+
+The prefix is ``FLASK_`` by default. This is configurable via the
+``prefix`` argument of :meth:`~flask.Config.from_prefixed_env`.
- import os
+Values will be parsed to attempt to convert them to a more specific type
+than strings. By default :func:`json.loads` is used, so any valid JSON
+value is possible, including lists and dicts. This is configurable via
+the ``loads`` argument of :meth:`~flask.Config.from_prefixed_env`.
- _mail_enabled = os.environ.get("MAIL_ENABLED", default="true")
- MAIL_ENABLED = _mail_enabled.lower() in {"1", "t", "true"}
+When adding a boolean value with the default JSON parsing, only "true"
+and "false", lowercase, are valid values. Keep in mind that any
+non-empty string is considered ``True`` by Python.
- SECRET_KEY = os.environ.get("SECRET_KEY")
+It is possible to set keys in nested dictionaries by separating the
+keys with double underscore (``__``). Any intermediate keys that don't
+exist on the parent dict will be initialized to an empty dict.
- if not SECRET_KEY:
- raise ValueError("No SECRET_KEY set for Flask application")
+.. code-block:: text
+ $ export FLASK_MYAPI__credentials__username=user123
+
+.. code-block:: python
-Notice that any value besides an empty string will be interpreted as a boolean
-``True`` value in Python, which requires care if an environment explicitly sets
-values intended to be ``False``.
+ app.config["MYAPI"]["credentials"]["username"] # Is "user123"
-Make sure to load the configuration very early on, so that extensions have the
-ability to access the configuration when starting up. There are other methods
-on the config object as well to load from individual files. For a complete
-reference, read the :class:`~flask.Config` class documentation.
+On Windows, environment variable keys are always uppercase, therefore
+the above example would end up as ``MYAPI__CREDENTIALS__USERNAME``.
+
+For even more config loading features, including merging and
+case-insensitive Windows support, try a dedicated library such as
+Dynaconf_, which includes integration with Flask.
+
+.. _Dynaconf: https://www.dynaconf.com/
Configuration Best Practices
@@ -603,6 +621,10 @@ that experience:
limit yourself to request-only accesses to the configuration you can
reconfigure the object later on as needed.
+3. Make sure to load the configuration very early on, so that
+ extensions can access the configuration when calling ``init_app``.
+
+
.. _config-dev-prod:
Development / Production
| [
{
"components": [
{
"doc": "",
"lines": [
10,
14
],
"name": "_json_loads",
"signature": "def _json_loads(raw: t.Union[str, bytes]) -> t.Any:",
"type": "function"
},
{
"doc": "Load any environment variables that start w... | [
"tests/test_config.py::test_from_prefixed_env",
"tests/test_config.py::test_from_prefixed_env_custom_prefix",
"tests/test_config.py::test_from_prefixed_env_nested"
] | [
"tests/test_config.py::test_config_from_pyfile",
"tests/test_config.py::test_config_from_object",
"tests/test_config.py::test_config_from_file",
"tests/test_config.py::test_config_from_mapping",
"tests/test_config.py::test_config_from_class",
"tests/test_config.py::test_config_from_envvar",
"tests/test_... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Allow loading of environment variables into the config
This new method will pick out any environment variables with a certain
prefix and place them into the config named without the prefix. This
makes it easy to use environment variables to configure the app as is
now more popular than when Flask started.
The prefix should ensure that the environment isn't polluted and the
config isn't polluted by environment variables.
Checklist:
- [x] Add tests that demonstrate the correct behavior of the change. Tests should fail without the change.
- [x] Add or update relevant docs, in the docs folder and in code.
- [x] Add an entry in `CHANGES.rst` summarizing the change and linking to the issue.
- [x] Add `.. versionchanged::` entries in any relevant code docs.
- [x] Run `pre-commit` hooks and fix any issues.
- [x] Run `pytest` and `tox`, no tests failed.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/flask/config.py]
(definition of _json_loads:)
def _json_loads(raw: t.Union[str, bytes]) -> t.Any:
(definition of Config.from_prefixed_env:)
def from_prefixed_env( self, prefix: str = "FLASK", *, loads: t.Callable[[str], t.Any] = json.loads ) -> bool:
"""Load any environment variables that start with ``FLASK_``,
dropping the prefix from the env key for the config key. Values
are passed through a loading function to attempt to convert them
to more specific types than strings.
Keys are loaded in :func:`sorted` order.
The default loading function attempts to parse values as any
valid JSON type, including dicts and lists.
Specific items in nested dicts can be set by separating the
keys with double underscores (``__``). If an intermediate key
doesn't exist, it will be initialized to an empty dict.
:param prefix: Load env vars that start with this prefix,
separated with an underscore (``_``).
:param loads: Pass each string value to this function and use
the returned value as the config value. If any error is
raised it is ignored and the value remains a string. The
default is :func:`json.loads`.
.. versionadded:: 2.1"""
[end of new definitions in src/flask/config.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 425a62686f094de236a5a9eb1d6885de4730efa5 | |
huggingface__datasets-3862 | 3,862 | huggingface/datasets | null | 7085abb8526c3347fb291990d0c4d79e746985ee | 2022-03-08T14:53:57Z | diff --git a/docs/source/stream.mdx b/docs/source/stream.mdx
index 7a27ec8e03b..d08745a618b 100644
--- a/docs/source/stream.mdx
+++ b/docs/source/stream.mdx
@@ -91,12 +91,11 @@ You can split your dataset one of two ways:
```py
>>> from datasets import interleave_datasets
->>> from itertools import islice
>>> en_dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True)
>>> fr_dataset = load_dataset('oscar', "unshuffled_deduplicated_fr", split='train', streaming=True)
>>> multilingual_dataset = interleave_datasets([en_dataset, fr_dataset])
->>> print(list(islice(multilingual_dataset, 2)))
+>>> list(multilingual_dataset.take(2))
[{'text': 'Mtendere Village was inspired by the vision...'}, {'text': "Média de débat d'idées, de culture et de littérature..."}]
```
@@ -104,15 +103,31 @@ Define sampling probabilities from each of the original datasets for more contro
```py
>>> multilingual_dataset_with_oversampling = interleave_datasets([en_dataset, fr_dataset], probabilities=[0.8, 0.2], seed=42)
->>> print(list(islice(multilingual_dataset_with_oversampling, 2)))
+>>> list(multilingual_dataset_with_oversampling.take(2))
[{'text': 'Mtendere Village was inspired by the vision...'}, {'text': 'Lily James cannot fight the music...'}]
```
Around 80% of the final dataset is made of the `en_dataset`, and 20% of the `fr_dataset`.
-## Remove
+## Rename, remove, and cast
-Remove columns on-the-fly with [`datasets.IterableDataset.remove_columns`]. Specify the name of the column to remove:
+The following methods allow you to modify the columns of a dataset. These methods are useful for renaming or removing columns and changing columns to a new set of features.
+
+### Rename
+
+Use [`datasets.IterableDataset.rename_column`] when you need to rename a column in your dataset. Features associated with the original column are actually moved under the new column name, instead of just replacing the original column in-place.
+
+Provide [`datasets.IterableDataset.rename_column`] with the name of the original column, and the new column name:
+
+```py
+>>> from datasets import load_dataset
+>>> dataset = load_dataset('mc4', 'en', streaming=True, split='train')
+>>> dataset = dataset.rename_column("text", "content")
+```
+
+### Remove
+
+When you need to remove one or more columns, give [`datasets.IterableDataset.remove_columns`] the name of the column to remove. Remove more than one column by providing a list of column names:
```py
>>> from datasets import load_dataset
@@ -120,6 +135,47 @@ Remove columns on-the-fly with [`datasets.IterableDataset.remove_columns`]. Spec
>>> dataset = dataset.remove_columns('timestamp')
```
+### Cast
+
+[`datasets.IterableDataset.cast`] changes the feature type of one or more columns. This method takes your new `datasets.Features` as its argument. The following sample code shows how to change the feature types of `datasets.ClassLabel` and `datasets.Value`:
+
+```py
+>>> from datasets import load_dataset
+>>> dataset = load_dataset('glue', 'mrpc', split='train')features
+{'sentence1': Value(dtype='string', id=None),
+'sentence2': Value(dtype='string', id=None),
+'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None),
+'idx': Value(dtype='int32', id=None)}
+
+>>> from datasets import ClassLabel, Value
+>>> new_features = dataset.features.copy()
+>>> new_features["label"] = ClassLabel(names=['negative', 'positive'])
+>>> new_features["idx"] = Value('int64')
+>>> dataset = dataset.cast(new_features)
+>>> dataset.features
+{'sentence1': Value(dtype='string', id=None),
+'sentence2': Value(dtype='string', id=None),
+'label': ClassLabel(num_classes=2, names=['negative', 'positive'], names_file=None, id=None),
+'idx': Value(dtype='int64', id=None)}
+```
+
+<Tip>
+
+Casting only works if the original feature type and new feature type are compatible. For example, you can cast a column with the feature type `Value('int32')` to `Value('bool')` if the original column only contains ones and zeros.
+
+</Tip>
+
+Use [`datasets.Dataset.cast_column`] to change the feature type of just one column. Pass the column name and its new feature type as arguments:
+
+```py
+>>> dataset.features
+{'audio': Audio(sampling_rate=44100, mono=True, id=None)}
+
+>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
+>>> dataset.features
+{'audio': Audio(sampling_rate=16000, mono=True, id=None)}
+```
+
## Map
Similar to the [`datasets.Dataset.map`] function for a regular [`datasets.Dataset`], 🤗 Datasets features [`datasets.IterableDataset.map`] for processing [`datasets.IterableDataset`]\s.
diff --git a/src/datasets/iterable_dataset.py b/src/datasets/iterable_dataset.py
index b255f9d7c79..108f59e1397 100644
--- a/src/datasets/iterable_dataset.py
+++ b/src/datasets/iterable_dataset.py
@@ -455,7 +455,7 @@ def with_format(
# TODO(QL): add pandas, numpy and tf formats
return iterable_dataset(
ex_iterable=self._ex_iterable,
- info=copy.deepcopy(self._info),
+ info=self._info.copy(),
split=self._split,
format_type=type,
shuffling=copy.deepcopy(self._shuffling),
@@ -463,7 +463,7 @@ def with_format(
def map(
self,
- function: Callable,
+ function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
batched: bool = False,
@@ -486,8 +486,15 @@ def map(
A batch is a dictionary, e.g. a batch of ``n`` examples is {"text": ["Hello there !"] * n}
Args:
- function (:obj:`Callable`, optional, default None): if not None, this function is applied
- on-the-fly on the examples when you iterate on the dataset.
+ function (:obj:`Callable`, optional, default None): Function applied on-the-fly on the examples when you iterate on the dataset
+ It must have one of the following signatures:
+
+ - `function(example: Union[Dict, Any]) -> dict` if `batched=False` and `with_indices=False`
+ - `function(example: Union[Dict, Any], idx: int) -> dict` if `batched=False` and `with_indices=True`
+ - `function(batch: Union[Dict[List], List[Any]]) -> dict` if `batched=True` and `with_indices=False`
+ - `function(batch: Union[Dict[List], List[Any]], indices: List[int]) -> dict` if `batched=True` and `with_indices=True`
+
+ If no function is provided, default to identity function: ``lambda x: x``.
with_indices (:obj:`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
input_columns (`Optional[Union[str, List[str]]]`, default `None`): The columns to be passed into `function`
as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
@@ -501,7 +508,9 @@ def map(
input_columns = [input_columns]
if isinstance(remove_columns, str):
remove_columns = [remove_columns]
- info = copy.deepcopy(self._info)
+ if function is None:
+ function = lambda x: x # noqa: E731
+ info = self._info.copy()
info.features = None
ex_iterable = MappedExamplesIterable(
TypedExamplesIterable(self._ex_iterable, self._info.features)
@@ -557,7 +566,7 @@ def shuffle(
ex_iterable=BufferShuffledExamplesIterable(
self._ex_iterable, buffer_size=buffer_size, generator=generator
).shuffle_data_sources(generator),
- info=copy.deepcopy(self._info),
+ info=self._info.copy(),
split=self._split,
format_type=self._format_type,
shuffling=shuffling,
@@ -576,7 +585,7 @@ def skip(self, n) -> "IterableDataset":
ex_iterable = SkipExamplesIterable(self._ex_iterable, n)
return iterable_dataset(
ex_iterable=ex_iterable,
- info=copy.deepcopy(self._info),
+ info=self._info.copy(),
split=self._split,
format_type=self._format_type,
shuffling=copy.deepcopy(self._shuffling),
@@ -592,12 +601,84 @@ def take(self, n) -> "IterableDataset":
ex_iterable = TakeExamplesIterable(self._ex_iterable, n)
return iterable_dataset(
ex_iterable=ex_iterable,
- info=copy.deepcopy(self._info),
+ info=self._info.copy(),
split=self._split,
format_type=self._format_type,
shuffling=copy.deepcopy(self._shuffling),
)
+ def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset":
+ """Add column to Dataset.
+
+ Args:
+ name (str): Column name.
+ column (list or np.array): Column data to be added.
+
+ Returns:
+ :class:`IterableDataset`
+ """
+
+ def add_column_fn(example, idx):
+ if name in example:
+ raise ValueError(f"Error when adding {name}: column {name} is already in the dataset.")
+ return {name: column[idx]}
+
+ return self.map(add_column_fn, with_indices=True)
+
+ def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+
+ Args:
+ original_column_name (:obj:`str`): Name of the column to rename.
+ new_column_name (:obj:`str`): New name for the column.
+
+ Returns:
+ :class:`IterableDataset`: A copy of the dataset with a renamed column.
+ """
+
+ def rename_column_fn(example):
+ if original_column_name not in example:
+ raise ValueError(
+ f"Error when renaming {original_column_name} to {new_column_name}: column {original_column_name} is not in the dataset."
+ )
+ if new_column_name in example:
+ raise ValueError(
+ f"Error when renaming {original_column_name} to {new_column_name}: column {new_column_name} is already in the dataset."
+ )
+ return {new_column_name: example[original_column_name]}
+
+ return self.map(rename_column_fn, remove_columns=[original_column_name])
+
+ def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+
+ Args:
+ column_mapping (:obj:`Dict[str, str]`): A mapping of columns to rename to their new names
+
+ Returns:
+ :class:`IterableDataset`: A copy of the dataset with renamed columns
+ """
+
+ def rename_columns_fn(example):
+ if any(col not in example for col in column_mapping):
+ raise ValueError(
+ f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(column_mapping) - set(example)} are not in the dataset."
+ )
+ if any(col in example for col in column_mapping.values()):
+ raise ValueError(
+ f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(example) - set(column_mapping.values())} are already in the dataset."
+ )
+ return {
+ new_column_name: example[original_column_name]
+ for original_column_name, new_column_name in column_mapping.items()
+ }
+
+ return self.map(rename_columns_fn, remove_columns=list(column_mapping))
+
def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
"""
Remove one or several column(s) in the dataset and the features associated to them.
@@ -610,13 +691,7 @@ def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatase
Returns:
:class:`IterableDataset`: A copy of the dataset object without the columns to remove.
"""
- if isinstance(column_names, str):
- column_names = [column_names]
-
- def remove_fn(example):
- return {k: v for k, v in example.items() if k not in column_names}
-
- return self.map(remove_fn)
+ return self.map(remove_columns=column_names)
def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset":
"""Cast column to feature for decoding.
@@ -628,8 +703,44 @@ def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset":
Returns:
:class:`IterableDataset`
"""
- info = copy.deepcopy(self._info)
+ info = self._info.copy()
info.features[column] = feature
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+ return iterable_dataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ format_type=self._format_type,
+ shuffling=copy.deepcopy(self._shuffling),
+ )
+
+ def cast(
+ self,
+ features: Features,
+ ) -> "IterableDataset":
+ """
+ Cast the dataset to a new set of features.
+
+ Args:
+ features (:class:`datasets.Features`): New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. string <-> ClassLabel you should use :func:`map` to update the Dataset.
+
+ Returns:
+ :class:`IterableDataset`: A copy of the dataset with casted features.
+ """
+ info = self._info.copy()
+ info.features = features
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
return iterable_dataset(
ex_iterable=self._ex_iterable,
info=info,
| diff --git a/tests/test_iterable_dataset.py b/tests/test_iterable_dataset.py
index 3d619dbe9c4..51f93dca833 100644
--- a/tests/test_iterable_dataset.py
+++ b/tests/test_iterable_dataset.py
@@ -50,6 +50,15 @@ def dataset(generate_examples_fn):
return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train")
+@pytest.fixture
+def dataset_with_several_columns(generate_examples_fn):
+ ex_iterable = ExamplesIterable(
+ generate_examples_fn,
+ {"filepath": ["data0.txt", "data1.txt", "data2.txt"], "metadata": {"sources": ["https://foo.bar"]}},
+ )
+ return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train")
+
+
################################
#
# _BaseExampleIterable tests
@@ -526,6 +535,40 @@ def test_iterable_dataset_shuffle_after_skip_or_take(generate_examples_fn, metho
assert sorted(dataset, key=key) == sorted(shuffled_dataset, key=key)
+def test_iterable_dataset_add_column(dataset_with_several_columns):
+ new_column = list(range(DEFAULT_N_EXAMPLES))
+ new_dataset = dataset_with_several_columns.add_column("new_column", new_column)
+ assert list(new_dataset) == [
+ {**example, "new_column": idx} for idx, example in enumerate(dataset_with_several_columns)
+ ]
+
+
+def test_iterable_dataset_rename_column(dataset_with_several_columns):
+ new_dataset = dataset_with_several_columns.rename_column("id", "new_id")
+ assert list(new_dataset) == [
+ {("new_id" if k == "id" else k): v for k, v in example.items()} for example in dataset_with_several_columns
+ ]
+
+
+def test_iterable_dataset_rename_columns(dataset_with_several_columns):
+ column_mapping = {"id": "new_id", "filepath": "filename"}
+ new_dataset = dataset_with_several_columns.rename_columns(column_mapping)
+ assert list(new_dataset) == [
+ {column_mapping.get(k, k): v for k, v in example.items()} for example in dataset_with_several_columns
+ ]
+
+
+def test_iterable_dataset_remove_columns(dataset_with_several_columns):
+ new_dataset = dataset_with_several_columns.remove_columns("id")
+ assert list(new_dataset) == [
+ {k: v for k, v in example.items() if k != "id"} for example in dataset_with_several_columns
+ ]
+ new_dataset = dataset_with_several_columns.remove_columns(["id", "filepath"])
+ assert list(new_dataset) == [
+ {k: v for k, v in example.items() if k != "id" and k != "filepath"} for example in dataset_with_several_columns
+ ]
+
+
def test_iterable_dataset_cast_column(generate_examples_fn):
ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10})
features = Features({"id": Value("int64"), "label": Value("int64")})
@@ -536,6 +579,15 @@ def test_iterable_dataset_cast_column(generate_examples_fn):
assert list(casted_dataset) == [casted_features.encode_example(ex) for _, ex in ex_iterable]
+def test_iterable_dataset_cast(generate_examples_fn):
+ ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10})
+ features = Features({"id": Value("int64"), "label": Value("int64")})
+ dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features))
+ new_features = Features({"id": Value("int64"), "label": Value("bool")})
+ casted_dataset = dataset.cast(new_features)
+ assert list(casted_dataset) == [new_features.encode_example(ex) for _, ex in ex_iterable]
+
+
@pytest.mark.parametrize(
"probas, seed, expected_length",
[
| diff --git a/docs/source/stream.mdx b/docs/source/stream.mdx
index 7a27ec8e03b..d08745a618b 100644
--- a/docs/source/stream.mdx
+++ b/docs/source/stream.mdx
@@ -91,12 +91,11 @@ You can split your dataset one of two ways:
```py
>>> from datasets import interleave_datasets
->>> from itertools import islice
>>> en_dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True)
>>> fr_dataset = load_dataset('oscar', "unshuffled_deduplicated_fr", split='train', streaming=True)
>>> multilingual_dataset = interleave_datasets([en_dataset, fr_dataset])
->>> print(list(islice(multilingual_dataset, 2)))
+>>> list(multilingual_dataset.take(2))
[{'text': 'Mtendere Village was inspired by the vision...'}, {'text': "Média de débat d'idées, de culture et de littérature..."}]
```
@@ -104,15 +103,31 @@ Define sampling probabilities from each of the original datasets for more contro
```py
>>> multilingual_dataset_with_oversampling = interleave_datasets([en_dataset, fr_dataset], probabilities=[0.8, 0.2], seed=42)
->>> print(list(islice(multilingual_dataset_with_oversampling, 2)))
+>>> list(multilingual_dataset_with_oversampling.take(2))
[{'text': 'Mtendere Village was inspired by the vision...'}, {'text': 'Lily James cannot fight the music...'}]
```
Around 80% of the final dataset is made of the `en_dataset`, and 20% of the `fr_dataset`.
-## Remove
+## Rename, remove, and cast
-Remove columns on-the-fly with [`datasets.IterableDataset.remove_columns`]. Specify the name of the column to remove:
+The following methods allow you to modify the columns of a dataset. These methods are useful for renaming or removing columns and changing columns to a new set of features.
+
+### Rename
+
+Use [`datasets.IterableDataset.rename_column`] when you need to rename a column in your dataset. Features associated with the original column are actually moved under the new column name, instead of just replacing the original column in-place.
+
+Provide [`datasets.IterableDataset.rename_column`] with the name of the original column, and the new column name:
+
+```py
+>>> from datasets import load_dataset
+>>> dataset = load_dataset('mc4', 'en', streaming=True, split='train')
+>>> dataset = dataset.rename_column("text", "content")
+```
+
+### Remove
+
+When you need to remove one or more columns, give [`datasets.IterableDataset.remove_columns`] the name of the column to remove. Remove more than one column by providing a list of column names:
```py
>>> from datasets import load_dataset
@@ -120,6 +135,47 @@ Remove columns on-the-fly with [`datasets.IterableDataset.remove_columns`]. Spec
>>> dataset = dataset.remove_columns('timestamp')
```
+### Cast
+
+[`datasets.IterableDataset.cast`] changes the feature type of one or more columns. This method takes your new `datasets.Features` as its argument. The following sample code shows how to change the feature types of `datasets.ClassLabel` and `datasets.Value`:
+
+```py
+>>> from datasets import load_dataset
+>>> dataset = load_dataset('glue', 'mrpc', split='train')features
+{'sentence1': Value(dtype='string', id=None),
+'sentence2': Value(dtype='string', id=None),
+'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None),
+'idx': Value(dtype='int32', id=None)}
+
+>>> from datasets import ClassLabel, Value
+>>> new_features = dataset.features.copy()
+>>> new_features["label"] = ClassLabel(names=['negative', 'positive'])
+>>> new_features["idx"] = Value('int64')
+>>> dataset = dataset.cast(new_features)
+>>> dataset.features
+{'sentence1': Value(dtype='string', id=None),
+'sentence2': Value(dtype='string', id=None),
+'label': ClassLabel(num_classes=2, names=['negative', 'positive'], names_file=None, id=None),
+'idx': Value(dtype='int64', id=None)}
+```
+
+<Tip>
+
+Casting only works if the original feature type and new feature type are compatible. For example, you can cast a column with the feature type `Value('int32')` to `Value('bool')` if the original column only contains ones and zeros.
+
+</Tip>
+
+Use [`datasets.Dataset.cast_column`] to change the feature type of just one column. Pass the column name and its new feature type as arguments:
+
+```py
+>>> dataset.features
+{'audio': Audio(sampling_rate=44100, mono=True, id=None)}
+
+>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
+>>> dataset.features
+{'audio': Audio(sampling_rate=16000, mono=True, id=None)}
+```
+
## Map
Similar to the [`datasets.Dataset.map`] function for a regular [`datasets.Dataset`], 🤗 Datasets features [`datasets.IterableDataset.map`] for processing [`datasets.IterableDataset`]\s.
| [
{
"components": [
{
"doc": "Add column to Dataset.\n\nArgs:\n name (str): Column name.\n column (list or np.array): Column data to be added.\n\nReturns:\n :class:`IterableDataset`",
"lines": [
610,
626
],
"name": "IterableDataset.add_column",
... | [
"tests/test_iterable_dataset.py::test_iterable_dataset_add_column",
"tests/test_iterable_dataset.py::test_iterable_dataset_rename_column",
"tests/test_iterable_dataset.py::test_iterable_dataset_rename_columns",
"tests/test_iterable_dataset.py::test_iterable_dataset_remove_columns",
"tests/test_iterable_data... | [
"tests/test_iterable_dataset.py::test_examples_iterable",
"tests/test_iterable_dataset.py::test_examples_iterable_with_kwargs",
"tests/test_iterable_dataset.py::test_examples_iterable_shuffle_data_sources",
"tests/test_iterable_dataset.py::test_examples_iterable_shuffle_shards_and_metadata",
"tests/test_ite... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Manipulate columns on IterableDataset (rename columns, cast, etc.)
I added:
- add_column
- cast
- rename_column
- rename_columns
related to https://github.com/huggingface/datasets/issues/3444
TODO:
- [x] docs
- [x] tests
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/datasets/iterable_dataset.py]
(definition of IterableDataset.add_column:)
def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset":
"""Add column to Dataset.
Args:
name (str): Column name.
column (list or np.array): Column data to be added.
Returns:
:class:`IterableDataset`"""
(definition of IterableDataset.add_column.add_column_fn:)
def add_column_fn(example, idx):
(definition of IterableDataset.rename_column:)
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset":
"""Rename a column in the dataset, and move the features associated to the original column under the new column
name.
Args:
original_column_name (:obj:`str`): Name of the column to rename.
new_column_name (:obj:`str`): New name for the column.
Returns:
:class:`IterableDataset`: A copy of the dataset with a renamed column."""
(definition of IterableDataset.rename_column.rename_column_fn:)
def rename_column_fn(example):
(definition of IterableDataset.rename_columns:)
def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset":
"""Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
Args:
column_mapping (:obj:`Dict[str, str]`): A mapping of columns to rename to their new names
Returns:
:class:`IterableDataset`: A copy of the dataset with renamed columns"""
(definition of IterableDataset.rename_columns.rename_columns_fn:)
def rename_columns_fn(example):
(definition of IterableDataset.cast:)
def cast( self, features: Features, ) -> "IterableDataset":
"""Cast the dataset to a new set of features.
Args:
features (:class:`datasets.Features`): New features to cast the dataset to.
The name of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. string <-> ClassLabel you should use :func:`map` to update the Dataset.
Returns:
:class:`IterableDataset`: A copy of the dataset with casted features."""
[end of new definitions in src/datasets/iterable_dataset.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 5142a8cf61d8a4495eda3d91dc4283a6df01ea14 | |
huggingface__accelerate-270 | 270 | huggingface/accelerate | null | 515fcca9ed2b36c274c595dbdff75f1c2da635de | 2022-03-07T19:01:35Z | diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py
index 1254c19e9af..0fd2a5701a7 100644
--- a/src/accelerate/accelerator.py
+++ b/src/accelerate/accelerator.py
@@ -22,7 +22,7 @@
from packaging import version
-from .checkpointing import load_accelerator_state, save_accelerator_state
+from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state
from .data_loader import prepare_data_loader
from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler
from .optimizer import AcceleratedOptimizer
@@ -33,6 +33,7 @@
convert_outputs_to_fp32,
extract_model_from_parallel,
gather,
+ get_pretty_name,
pad_across_processes,
save,
wait_for_everyone,
@@ -188,6 +189,7 @@ def __init__(
# Internal references to the training objects
self._optimizers = []
self._models = []
+ self._custom_objects = []
# RNG Types
self.rng_types = rng_types
@@ -564,7 +566,7 @@ def save(self, obj, f):
def save_state(self, output_dir: str):
"""
- Saves the current states of the model, optimizer, scaler, and RNG generators.
+ Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects.
Args:
output_dir (:obj:`str` or :obj:`os.PathLike`):
@@ -575,11 +577,16 @@ def save_state(self, output_dir: str):
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving current state to {output_dir}")
weights = [self.get_state_dict(m) for m in self._models]
- return save_accelerator_state(output_dir, weights, self._optimizers, self.state.process_index, self.scaler)
+ save_location = save_accelerator_state(
+ output_dir, weights, self._optimizers, self.state.process_index, self.scaler
+ )
+ for i, obj in enumerate(self._custom_objects):
+ save_custom_state(obj, output_dir, i)
+ return save_location
def load_state(self, input_dir: str):
"""
- Loads the current states of the model, optimizer, scaler, and RNG generators.
+ Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.
Args:
input_dir (:obj:`str` or :obj:`os.PathLike`):
@@ -591,6 +598,16 @@ def load_state(self, input_dir: str):
raise ValueError(f"Tried to find {input_dir} but folder does not exist")
logger.info(f"Loading states from {input_dir}")
load_accelerator_state(input_dir, self._models, self._optimizers, self.state.process_index, self.scaler)
+ custom_checkpoints = [f for f in os.listdir(input_dir) if "custom_checkpoint" in f]
+ if len(custom_checkpoints) != len(self._custom_objects):
+ err = "Warning! Number of found checkpoints does not match the number of registered objects:"
+ err += f"\n\tFound checkpoints: {len(custom_checkpoints)}"
+ err += f"\n\tRegistered objects: {len(self._custom_objects)}\nSkipping."
+ logger.warn(err)
+ else:
+ logger.info(f"Loading in {len(custom_checkpoints)} custom states")
+ for index, obj in enumerate(self._custom_objects):
+ load_custom_state(obj, input_dir, index)
def free_memory(self):
"""
@@ -646,6 +663,26 @@ def get_state_dict(self, model):
return state_dict
+ def register_for_checkpointing(self, *objects):
+ """
+ Makes note of `objects` and will save or load them in during `save_state` or `load_state`.
+
+ These should be utilized when the state is being loaded or saved in the same script. It is not designed to be
+ used in different scripts
+
+ Note: Every `object` must have a `load_state_dict` and `state_dict` function to be stored.
+ """
+ invalid_objects = []
+ for obj in objects:
+ if not hasattr(obj, "state_dict") or not hasattr(obj, "load_state_dict"):
+ invalid_objects.append(obj)
+ if len(invalid_objects) > 0:
+ err = "All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:"
+ for index, obj in enumerate(invalid_objects):
+ err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`"
+ raise ValueError(err)
+ self._custom_objects.extend(objects)
+
@contextmanager
def autocast(self):
"""
diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py
index 62b1291ee58..4b20cc1c3c1 100644
--- a/src/accelerate/utils.py
+++ b/src/accelerate/utils.py
@@ -691,3 +691,16 @@ def patch_environment(**kwargs):
for key in kwargs:
del os.environ[key.upper()]
+
+
+def get_pretty_name(obj):
+ """
+ Gets a pretty name from ``obj``
+ """
+ if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"):
+ obj = getattr(obj, "__class__", obj)
+ if hasattr(obj, "__qualname__"):
+ return obj.__qualname__
+ if hasattr(obj, "__name__"):
+ return obj.__name__
+ return str(obj)
| diff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py
index 37d68b90c25..e1a28d108fe 100644
--- a/src/accelerate/checkpointing.py
+++ b/src/accelerate/checkpointing.py
@@ -14,6 +14,7 @@
import os
import random
+from pathlib import Path
from typing import List
import numpy as np
@@ -21,7 +22,7 @@
from torch.cuda.amp import GradScaler
from .state import is_tpu_available
-from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save
+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, get_pretty_name, save
if is_tpu_available():
@@ -132,3 +133,22 @@ def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=
if is_tpu_available():
xm.set_rng_state(states["xm_seed"])
logger.info("All random states loaded successfully")
+
+
+def save_custom_state(obj, path, index: int = 0):
+ """
+ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
+ """
+ # Should this be the right way to get a qual_name type value from `obj`?
+ save_location = Path(path) / f"custom_checkpoint_{index}.pkl"
+ logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}")
+ torch.save(obj.state_dict(), save_location)
+
+
+def load_custom_state(obj, path, index: int = 0):
+ """
+ Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`
+ """
+ load_location = f"{path}/custom_checkpoint_{index}.pkl"
+ logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}")
+ obj.load_state_dict(torch.load(load_location))
diff --git a/tests/test_state_checkpointing.py b/tests/test_state_checkpointing.py
index a74dcb7247b..87b2d3b0674 100644
--- a/tests/test_state_checkpointing.py
+++ b/tests/test_state_checkpointing.py
@@ -43,13 +43,13 @@ def get_dataset(n_batches):
return (train_dataloader, valid_dataloader)
-def train(num_epochs, model, dataloader, optimizer, accelerator):
+def train(num_epochs, model, dataloader, optimizer, accelerator, scheduler=None):
"Trains for `num_epochs`"
rands = []
for epoch in range(num_epochs):
# Train quickly
model.train()
- for step, batch in enumerate(dataloader):
+ for batch in dataloader:
x, y = batch
outputs = model(x)
loss = torch.nn.functional.mse_loss(outputs, y)
@@ -57,6 +57,8 @@ def train(num_epochs, model, dataloader, optimizer, accelerator):
optimizer.step()
optimizer.zero_grad()
rands.append(random.random()) # Introduce some randomness
+ if scheduler is not None:
+ scheduler.step()
return rands
@@ -123,3 +125,41 @@ def test_can_resume_training(self):
self.assertEqual(b1, b3)
self.assertEqual(opt_state1, opt_state3)
self.assertEqual(ground_truth_rands, test_rands)
+
+ def test_invalid_registration(self):
+ t = torch.tensor([1, 2, 3])
+ t1 = torch.tensor([2, 3, 4])
+ net = DummyModel()
+ opt = torch.optim.Adam(net.parameters())
+ accelerator = Accelerator()
+ with self.assertRaises(ValueError) as ve:
+ accelerator.register_for_checkpointing(t, t1, net, opt)
+ message = str(ve.exception)
+ self.assertTrue("Item at index 0" in message)
+ self.assertTrue("Item at index 1" in message)
+ self.assertFalse("Item at index 2" in message)
+ self.assertFalse("Item at index 3" in message)
+
+ def test_with_scheduler(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ set_seed(42)
+ model = DummyModel()
+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
+ scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
+ train_dataloader, valid_dataloader = dummy_dataloaders()
+ # Train baseline
+ accelerator = Accelerator()
+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(
+ model, optimizer, train_dataloader, valid_dataloader
+ )
+ accelerator.register_for_checkpointing(scheduler)
+ # Save initial
+ initial = os.path.join(tmpdir, "initial")
+ accelerator.save_state(initial)
+ scheduler_state = scheduler.state_dict()
+ train(3, model, train_dataloader, optimizer, accelerator, scheduler)
+ self.assertNotEqual(scheduler_state, scheduler.state_dict())
+
+ # Load everything back in and make sure all states work
+ accelerator.load_state(initial)
+ self.assertEqual(scheduler_state, scheduler.state_dict())
| [
{
"components": [
{
"doc": "Makes note of `objects` and will save or load them in during `save_state` or `load_state`.\n\nThese should be utilized when the state is being loaded or saved in the same script. It is not designed to be\nused in different scripts\n\nNote: Every `object` must have a `lo... | [
"tests/test_state_checkpointing.py::CheckpointTest::test_can_resume_training",
"tests/test_state_checkpointing.py::CheckpointTest::test_invalid_registration",
"tests/test_state_checkpointing.py::CheckpointTest::test_with_scheduler"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Implementation of saving and loading custom states
Introduces a `register_for_checkpointing` function, as well as two save/loading functions for those registered objects.
Would like an initial review for if I'm on the right track 😄
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/accelerate/accelerator.py]
(definition of Accelerator.register_for_checkpointing:)
def register_for_checkpointing(self, *objects):
"""Makes note of `objects` and will save or load them in during `save_state` or `load_state`.
These should be utilized when the state is being loaded or saved in the same script. It is not designed to be
used in different scripts
Note: Every `object` must have a `load_state_dict` and `state_dict` function to be stored."""
[end of new definitions in src/accelerate/accelerator.py]
[start of new definitions in src/accelerate/utils.py]
(definition of get_pretty_name:)
def get_pretty_name(obj):
"""Gets a pretty name from ``obj``"""
[end of new definitions in src/accelerate/utils.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 08101b9dde2b1a9658c2e363e3e9f5663ba06073 | ||
pypa__hatch-150 | 150 | pypa/hatch | null | aecadf56d2d5054f4686ba0c1c8908f2baa998e2 | 2022-03-05T22:16:19Z | diff --git a/docs/config/environment.md b/docs/config/environment.md
index 860c2d0f7..060c14968 100644
--- a/docs/config/environment.md
+++ b/docs/config/environment.md
@@ -390,6 +390,28 @@ The following platforms are supported:
If unspecified, the environment is assumed to be compatible with all platforms.
+## Description
+
+The `description` option is purely informational and is displayed in the output of the [`env show`](../cli/reference.md#hatch-env-show) command:
+
+=== ":octicons-file-code-16: pyproject.toml"
+
+ ```toml
+ [tool.hatch.envs.<ENV_NAME>]
+ description = """
+ Lorem ipsum ...
+ """
+ ```
+
+=== ":octicons-file-code-16: hatch.toml"
+
+ ```toml
+ [envs.<ENV_NAME>]
+ description = """
+ Lorem ipsum ...
+ """
+ ```
+
## Type
An environment's `type` determines which [environment plugin](../plugins/environment.md) will be used for management. The only built-in environment type is [virtual](../plugins/environment.md#virtual), which uses virtual Python environments.
diff --git a/docs/plugins/environment.md b/docs/plugins/environment.md
index 617fc13c4..0be31e77a 100644
--- a/docs/plugins/environment.md
+++ b/docs/plugins/environment.md
@@ -66,6 +66,7 @@ The environment plugin name is `virtual`.
- platforms
- skip_install
- dev_mode
+ - description
- activate
- deactivate
- find
diff --git a/src/hatch/cli/env/__init__.py b/src/hatch/cli/env/__init__.py
index 789b0deb3..6513f1b82 100644
--- a/src/hatch/cli/env/__init__.py
+++ b/src/hatch/cli/env/__init__.py
@@ -50,7 +50,17 @@ def set_available_columns(columns):
sorted('='.join(item) for item in config['env-vars'].items())
)
- matrix_columns = {'Name': {}, 'Type': {}, 'Envs': {}, 'Dependencies': {}, 'Environment variables': {}}
+ if config.get('description'):
+ columns['Description'][i] = config['description'].strip()
+
+ matrix_columns = {
+ 'Name': {},
+ 'Type': {},
+ 'Envs': {},
+ 'Dependencies': {},
+ 'Environment variables': {},
+ 'Description': {},
+ }
matrix_envs = set()
for i, (matrix_name, matrix_data) in enumerate(project_config.matrices.items()):
for env_name in matrix_data['envs']:
@@ -62,7 +72,7 @@ def set_available_columns(columns):
matrix_columns['Envs'][i] = '\n'.join(matrix_data['envs'])
set_available_columns(matrix_columns)
- standalone_columns = {'Name': {}, 'Type': {}, 'Dependencies': {}, 'Environment variables': {}}
+ standalone_columns = {'Name': {}, 'Type': {}, 'Dependencies': {}, 'Environment variables': {}, 'Description': {}}
standalone_envs = (
(env_name, config) for env_name, config in project_config.envs.items() if env_name not in matrix_envs
)
@@ -73,7 +83,8 @@ def set_available_columns(columns):
column_options = {}
for title in matrix_columns:
- column_options[title] = {'no_wrap': True}
+ if title != 'Description':
+ column_options[title] = {'no_wrap': True}
app.display_table(
'Standalone', standalone_columns, show_lines=True, column_options=column_options, force_ascii=force_ascii
diff --git a/src/hatch/cli/terminal.py b/src/hatch/cli/terminal.py
index 53e61fd04..c8063d834 100644
--- a/src/hatch/cli/terminal.py
+++ b/src/hatch/cli/terminal.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import os
from contextlib import contextmanager
from textwrap import indent as indent_text
@@ -15,7 +16,13 @@ def __init__(self, verbosity, enable_color, interactive):
self.verbosity = verbosity
self.interactive = interactive
self.console = Console(
- force_terminal=enable_color, no_color=enable_color is False, markup=False, emoji=False, highlight=False
+ force_terminal=enable_color,
+ no_color=enable_color is False,
+ markup=False,
+ emoji=False,
+ highlight=False,
+ # Force consistent output for test assertions
+ legacy_windows=False if 'HATCH_SELF_TESTING' in os.environ else None,
)
# Set defaults so we can pretty print before loading user config
diff --git a/src/hatch/env/plugin/interface.py b/src/hatch/env/plugin/interface.py
index 1699c48e6..274a85049 100644
--- a/src/hatch/env/plugin/interface.py
+++ b/src/hatch/env/plugin/interface.py
@@ -63,6 +63,7 @@ def __init__(self, root, metadata, name, config, data_directory, platform, verbo
self._skip_install = None
self._dev_mode = None
self._features = None
+ self._description = None
self._scripts = None
self._pre_install_commands = None
self._post_install_commands = None
@@ -398,6 +399,32 @@ def features(self):
return self._features
+ @property
+ def description(self) -> str:
+ """
+ === ":octicons-file-code-16: pyproject.toml"
+
+ ```toml
+ [tool.hatch.envs.<ENV_NAME>]
+ description = ...
+ ```
+
+ === ":octicons-file-code-16: hatch.toml"
+
+ ```toml
+ [envs.<ENV_NAME>]
+ description = ...
+ ```
+ """
+ if self._description is None:
+ description = self.config.get('description', '')
+ if not isinstance(description, str):
+ raise TypeError(f'Field `tool.hatch.envs.{self.name}.description` must be a string')
+
+ self._description = description
+
+ return self._description
+
@property
def scripts(self):
if self._scripts is None:
| diff --git a/tests/cli/env/test_show.py b/tests/cli/env/test_show.py
index 9a84dafcb..c576e38ca 100644
--- a/tests/cli/env/test_show.py
+++ b/tests/cli/env/test_show.py
@@ -5,7 +5,7 @@
@pytest.fixture(scope='module', autouse=True)
-def local_builder():
+def terminal_width():
with EnvVars({'COLUMNS': '200'}):
yield
@@ -231,17 +231,26 @@ def test_optional_columns(hatch, helpers, temp_dir, config_file):
dependencies = ['python___dateutil', 'bAr.Baz[TLS] >=1.2RC5', 'Foo;python_version<"3.8"']
env_vars = {'FOO': '1', 'BAR': '2'}
+ description = """
+Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna \
+aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. \
+Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint \
+occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+"""
project = Project(project_path)
helpers.update_project_environment(
project,
'default',
{
'matrix': [{'version': ['9000', '3.14'], 'py': ['39', '310']}],
+ 'description': description,
'dependencies': dependencies,
'env-vars': env_vars,
},
)
- helpers.update_project_environment(project, 'foo', {'dependencies': dependencies, 'env-vars': env_vars})
+ helpers.update_project_environment(
+ project, 'foo', {'description': description, 'dependencies': dependencies, 'env-vars': env_vars}
+ )
with project_path.as_cwd():
result = hatch('env', 'show', '--ascii')
@@ -249,22 +258,24 @@ def test_optional_columns(hatch, helpers, temp_dir, config_file):
assert result.exit_code == 0, result.output
assert helpers.remove_trailing_spaces(result.output) == helpers.dedent(
"""
- Standalone
- +------+---------+-----------------------------+-----------------------+
- | Name | Type | Dependencies | Environment variables |
- +======+=========+=============================+=======================+
- | foo | virtual | bar-baz[tls]>=1.2rc5 | BAR=2 |
- | | | foo; python_version < '3.8' | FOO=1 |
- | | | python-dateutil | |
- +------+---------+-----------------------------+-----------------------+
- Matrices
- +---------+---------+------------+-----------------------------+-----------------------+
- | Name | Type | Envs | Dependencies | Environment variables |
- +=========+=========+============+=============================+=======================+
- | default | virtual | py39-9000 | bar-baz[tls]>=1.2rc5 | BAR=2 |
- | | | py39-3.14 | foo; python_version < '3.8' | FOO=1 |
- | | | py310-9000 | python-dateutil | |
- | | | py310-3.14 | | |
- +---------+---------+------------+-----------------------------+-----------------------+
- """
+ Standalone
+ +------+---------+-----------------------------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------+
+ | Name | Type | Dependencies | Environment variables | Description |
+ +======+=========+=============================+=======================+===============================================================================================================================+
+ | foo | virtual | bar-baz[tls]>=1.2rc5 | BAR=2 | Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. |
+ | | | foo; python_version < '3.8' | FOO=1 | Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure |
+ | | | python-dateutil | | dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non |
+ | | | | | proident, sunt in culpa qui officia deserunt mollit anim id est laborum. |
+ +------+---------+-----------------------------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------+
+ Matrices
+ +---------+---------+------------+-----------------------------+-----------------------+---------------------------------------------------------------------------------------------------------------+
+ | Name | Type | Envs | Dependencies | Environment variables | Description |
+ +=========+=========+============+=============================+=======================+===============================================================================================================+
+ | default | virtual | py39-9000 | bar-baz[tls]>=1.2rc5 | BAR=2 | Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore |
+ | | | py39-3.14 | foo; python_version < '3.8' | FOO=1 | magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea |
+ | | | py310-9000 | python-dateutil | | commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat |
+ | | | py310-3.14 | | | nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit |
+ | | | | | | anim id est laborum. |
+ +---------+---------+------------+-----------------------------+-----------------------+---------------------------------------------------------------------------------------------------------------+
+ """ # noqa: E501
)
diff --git a/tests/cli/run/test_run.py b/tests/cli/run/test_run.py
index ca83d584a..e40474fab 100644
--- a/tests/cli/run/test_run.py
+++ b/tests/cli/run/test_run.py
@@ -391,7 +391,7 @@ def test_error(hatch, helpers, temp_dir, config_file):
assert not output_file.is_file()
-def test_matrix(hatch, helpers, temp_dir, config_file, legacy_windows_terminal):
+def test_matrix(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
@@ -415,13 +415,12 @@ def test_matrix(hatch, helpers, temp_dir, config_file, legacy_windows_terminal):
'run', 'test:python', '-c', "import os,sys;open('test.txt', 'a').write(sys.executable+os.linesep[-1])"
)
- padding = '' if legacy_windows_terminal else '─'
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
- f"""
- {padding}───────────────────────────────── test.9000 ──────────────────────────────────{padding}
+ """
+ ────────────────────────────────── test.9000 ───────────────────────────────────
Creating environment: test.9000
- {padding}────────────────────────────────── test.42 ───────────────────────────────────{padding}
+ ─────────────────────────────────── test.42 ────────────────────────────────────
Creating environment: test.42
"""
)
@@ -531,7 +530,7 @@ def test_incompatible_matrix_full(hatch, helpers, temp_dir, config_file):
assert not env_data_path.is_dir()
-def test_incompatible_matrix_partial(hatch, helpers, temp_dir, config_file, legacy_windows_terminal):
+def test_incompatible_matrix_partial(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
@@ -562,11 +561,10 @@ def test_incompatible_matrix_partial(hatch, helpers, temp_dir, config_file, lega
'run', 'test:python', '-c', "import os,sys;open('test.txt', 'a').write(sys.executable+os.linesep[-1])"
)
- padding = '' if legacy_windows_terminal else '─'
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
- f"""
- {padding}────────────────────────────────── test.42 ───────────────────────────────────{padding}
+ """
+ ─────────────────────────────────── test.42 ────────────────────────────────────
Creating environment: test.42
Skipped 1 incompatible environment:
@@ -600,7 +598,7 @@ def test_incompatible_matrix_partial(hatch, helpers, temp_dir, config_file, lega
assert str(env_path) in python_path
-def test_incompatible_missing_python(hatch, helpers, temp_dir, config_file, legacy_windows_terminal):
+def test_incompatible_missing_python(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
@@ -625,13 +623,14 @@ def test_incompatible_missing_python(hatch, helpers, temp_dir, config_file, lega
'run', 'test:python', '-c', "import os,sys;open('test.txt', 'a').write(sys.executable+os.linesep[-1])"
)
- padding = '' if legacy_windows_terminal else '─'
- left_pad = padding
- right_pad = padding + ('─' if len(known_version) < 3 else '')
+ padding = '─'
+ if len(known_version) < 3:
+ padding += '─'
+
assert result.exit_code == 0, result.output
assert result.output == helpers.dedent(
f"""
- {left_pad}───────────────────────────────── test.py{known_version} ─────────────────────────────────{right_pad}
+ ────────────────────────────────── test.py{known_version} ─────────────────────────────────{padding}
Creating environment: test.py{known_version}
Skipped 1 incompatible environment:
diff --git a/tests/conftest.py b/tests/conftest.py
index 32005dbad..f863c650c 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -70,6 +70,7 @@ def isolation() -> Generator[Path, None, None]:
ConfigEnvVars.DATA: str(data_dir),
ConfigEnvVars.CACHE: str(cache_dir),
PublishEnvVars.REPO: 'test',
+ 'HATCH_SELF_TESTING': 'true',
'GIT_AUTHOR_NAME': 'Foo Bar',
'GIT_AUTHOR_EMAIL': 'foo@bar.baz',
'COLUMNS': '80',
@@ -110,13 +111,6 @@ def current_platform():
return PLATFORM.name
-@pytest.fixture(scope='session')
-def legacy_windows_terminal():
- from rich.console import detect_legacy_windows
-
- return detect_legacy_windows()
-
-
@pytest.fixture
def temp_dir() -> Generator[Path, None, None]:
with temp_directory() as d:
diff --git a/tests/env/plugin/test_interface.py b/tests/env/plugin/test_interface.py
index 6336c2b0b..9a3b978e5 100644
--- a/tests/env/plugin/test_interface.py
+++ b/tests/env/plugin/test_interface.py
@@ -307,7 +307,7 @@ def test_disable(self, isolation, data_dir, platform):
assert environment.dev_mode is False
-class TestFeature:
+class TestFeatures:
def test_default(self, isolation, data_dir, platform):
config = {'project': {'name': 'my_app', 'version': '0.0.1'}}
project = Project(isolation, config=config)
@@ -390,6 +390,43 @@ def test_feature_undefined(self, isolation, data_dir, platform):
_ = environment.features
+class TestDescription:
+ def test_default(self, isolation, data_dir, platform):
+ config = {'project': {'name': 'my_app', 'version': '0.0.1'}}
+ project = Project(isolation, config=config)
+ environment = MockEnvironment(
+ isolation, project.metadata, 'default', project.config.envs['default'], data_dir, platform, 0
+ )
+
+ assert environment.description == environment.description == ''
+
+ def test_not_string(self, isolation, data_dir, platform):
+ config = {
+ 'project': {'name': 'my_app', 'version': '0.0.1'},
+ 'tool': {'hatch': {'envs': {'default': {'description': 9000}}}},
+ }
+ project = Project(isolation, config=config)
+ environment = MockEnvironment(
+ isolation, project.metadata, 'default', project.config.envs['default'], data_dir, platform, 0
+ )
+
+ with pytest.raises(TypeError, match='Field `tool.hatch.envs.default.description` must be a string'):
+ _ = environment.description
+
+ def test_correct(self, isolation, data_dir, platform):
+ description = 'foo'
+ config = {
+ 'project': {'name': 'my_app', 'version': '0.0.1'},
+ 'tool': {'hatch': {'envs': {'default': {'description': description}}}},
+ }
+ project = Project(isolation, config=config)
+ environment = MockEnvironment(
+ isolation, project.metadata, 'default', project.config.envs['default'], data_dir, platform, 0
+ )
+
+ assert environment.description is description
+
+
class TestDependencies:
def test_default(self, isolation, data_dir, platform):
config = {'project': {'name': 'my_app', 'version': '0.0.1', 'dependencies': ['dep1']}}
| diff --git a/docs/config/environment.md b/docs/config/environment.md
index 860c2d0f7..060c14968 100644
--- a/docs/config/environment.md
+++ b/docs/config/environment.md
@@ -390,6 +390,28 @@ The following platforms are supported:
If unspecified, the environment is assumed to be compatible with all platforms.
+## Description
+
+The `description` option is purely informational and is displayed in the output of the [`env show`](../cli/reference.md#hatch-env-show) command:
+
+=== ":octicons-file-code-16: pyproject.toml"
+
+ ```toml
+ [tool.hatch.envs.<ENV_NAME>]
+ description = """
+ Lorem ipsum ...
+ """
+ ```
+
+=== ":octicons-file-code-16: hatch.toml"
+
+ ```toml
+ [envs.<ENV_NAME>]
+ description = """
+ Lorem ipsum ...
+ """
+ ```
+
## Type
An environment's `type` determines which [environment plugin](../plugins/environment.md) will be used for management. The only built-in environment type is [virtual](../plugins/environment.md#virtual), which uses virtual Python environments.
diff --git a/docs/plugins/environment.md b/docs/plugins/environment.md
index 617fc13c4..0be31e77a 100644
--- a/docs/plugins/environment.md
+++ b/docs/plugins/environment.md
@@ -66,6 +66,7 @@ The environment plugin name is `virtual`.
- platforms
- skip_install
- dev_mode
+ - description
- activate
- deactivate
- find
| [
{
"components": [
{
"doc": "=== \":octicons-file-code-16: pyproject.toml\"\n\n ```toml\n [tool.hatch.envs.<ENV_NAME>]\n description = ...\n ```\n\n=== \":octicons-file-code-16: hatch.toml\"\n\n ```toml\n [envs.<ENV_NAME>]\n description = ...\n ```",
"lines": [
... | [
"tests/cli/env/test_show.py::test_optional_columns",
"tests/env/plugin/test_interface.py::TestDescription::test_default",
"tests/env/plugin/test_interface.py::TestDescription::test_not_string",
"tests/env/plugin/test_interface.py::TestDescription::test_correct"
] | [
"tests/cli/env/test_show.py::test_default",
"tests/cli/env/test_show.py::test_single_only",
"tests/cli/env/test_show.py::test_single_and_matrix",
"tests/cli/env/test_show.py::test_default_matrix_only",
"tests/cli/env/test_show.py::test_all_matrix_types_with_single",
"tests/cli/run/test_run.py::test_automa... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add `description` option to environments
{}
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/hatch/env/plugin/interface.py]
(definition of EnvironmentInterface.description:)
def description(self) -> str:
"""=== ":octicons-file-code-16: pyproject.toml"
```toml
[tool.hatch.envs.<ENV_NAME>]
description = ...
```
=== ":octicons-file-code-16: hatch.toml"
```toml
[envs.<ENV_NAME>]
description = ...
```"""
[end of new definitions in src/hatch/env/plugin/interface.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | c06c820d722667306f39bda956c58cf4c48d0728 | |
huggingface__datasets-3826 | 3,826 | huggingface/datasets | null | 2a5149bef20400d8c1fb57d4fe723b7ef6785359 | 2022-03-04T16:57:23Z | diff --git a/docs/source/package_reference/main_classes.mdx b/docs/source/package_reference/main_classes.mdx
index 36a76a5b57d..8915dec13ef 100644
--- a/docs/source/package_reference/main_classes.mdx
+++ b/docs/source/package_reference/main_classes.mdx
@@ -152,6 +152,7 @@ The base class [`datasets.IterableDataset`] implements an iterable Dataset backe
- cast_column
- __iter__
- map
+ - filter
- shuffle
- skip
- take
diff --git a/docs/source/process.mdx b/docs/source/process.mdx
index b6e228b6b17..5eb535f6bd3 100644
--- a/docs/source/process.mdx
+++ b/docs/source/process.mdx
@@ -83,7 +83,7 @@ There are two options for filtering rows in a dataset: [`datasets.Dataset.select
[`datasets.Dataset.filter`] can also filter by indices if you set `with_indices=True`:
```py
->>> even_dataset = dataset.filter(lambda example, indice: indice % 2 == 0, with_indices=True)
+>>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True)
>>> len(even_dataset)
1834
>>> len(dataset) / 2
diff --git a/docs/source/stream.mdx b/docs/source/stream.mdx
index 7a27ec8e03b..21240157a0b 100644
--- a/docs/source/stream.mdx
+++ b/docs/source/stream.mdx
@@ -184,6 +184,28 @@ See other examples of batch processing in [the batched map processing documentat
</Tip>
+### Filter
+
+You can filter rows in the dataset based on a predicate function using [`datasets.Dataset.filter`]. It returns rows that match a specified condition:
+
+```py
+>>> from datasets import load_dataset
+>>> dataset = load_dataset('oscar', 'unshuffled_deduplicated_en', streaming=True, split='train')
+>>> start_with_ar = dataset.filter(lambda example: example['text'].startswith('Ar'))
+>>> next(iter(start_with_ar))
+{'id': 4, 'text': 'Are you looking for Number the Stars (Essential Modern Classics)?...'}
+```
+
+[`datasets.Dataset.filter`] can also filter by indices if you set `with_indices=True`:
+
+```py
+>>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True)
+>>> list(even_dataset.take(3))
+[{'id': 0, 'text': 'Mtendere Village was inspired by the vision of Chief Napoleon Dzombe, ...'},
+ {'id': 2, 'text': '"I\'d love to help kickstart continued development! And 0 EUR/month...'},
+ {'id': 4, 'text': 'Are you looking for Number the Stars (Essential Modern Classics)? Normally, ...'}]
+```
+
## Stream in a training loop
[`datasets.IterableDataset`] can be integrated into a training loop. First, shuffle the dataset:
diff --git a/src/datasets/iterable_dataset.py b/src/datasets/iterable_dataset.py
index b255f9d7c79..1b75f17a623 100644
--- a/src/datasets/iterable_dataset.py
+++ b/src/datasets/iterable_dataset.py
@@ -264,6 +264,73 @@ def n_shards(self) -> int:
return self.ex_iterable.n_shards
+class FilteredExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ function: Callable,
+ with_indices: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: int = 1000,
+ ):
+ self.ex_iterable = ex_iterable
+ self.function = function
+ self.batched = batched
+ self.batch_size = batch_size
+ self.with_indices = with_indices
+ self.input_columns = input_columns
+
+ def __iter__(self):
+ iterator = iter(self.ex_iterable)
+ current_idx = 0
+ if self.batched:
+ for key, example in iterator:
+ # If batched, first build the batch
+ key_examples_list = [(key, example)] + [
+ (key, example) for key, example in islice(iterator, self.batch_size - 1)
+ ]
+ keys, examples = zip(*key_examples_list)
+ batch = _examples_to_batch(examples)
+ # then compute the mask for the batch
+ inputs = batch
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append([current_idx + i for i in range(len(key_examples_list))])
+ mask = self.function(*function_args)
+ # yield one example at a time from the batch
+ for batch_idx, (key_example, to_keep) in enumerate(zip(key_examples_list, mask)):
+ if to_keep:
+ yield key_example
+ current_idx += batch_idx + 1
+ else:
+ for key, example in iterator:
+ # If not batched, we can apply the filtering function direcly
+ inputs = dict(example)
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append(current_idx)
+ to_keep = self.function(*function_args)
+ if to_keep:
+ yield key, example
+ current_idx += 1
+
+ def shuffle_data_sources(self, seed: Optional[int]) -> "MappedExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return FilteredExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(seed),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
class BufferShuffledExamplesIterable(_BaseExamplesIterable):
def __init__(self, ex_iterable: _BaseExamplesIterable, buffer_size: int, generator: np.random.Generator):
self.ex_iterable = ex_iterable
@@ -469,7 +536,7 @@ def map(
batched: bool = False,
batch_size: int = 1000,
remove_columns: Optional[Union[str, List[str]]] = None,
- ):
+ ) -> "IterableDataset":
"""
Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
If your function returns a column that already exists, then it overwrites it.
@@ -522,6 +589,58 @@ def map(
shuffling=copy.deepcopy(self._shuffling),
)
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices=False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ ) -> "IterableDataset":
+ """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
+ The filtering is done on-the-fly when iterating over the dataset.
+
+ Args:
+ function (:obj:`Callable`): Callable with one of the following signatures:
+
+ - ``function(example: Union[Dict, Any]) -> bool`` if ``with_indices=False, batched=False``
+ - ``function(example: Union[Dict, Any], indices: int) -> bool`` if ``with_indices=True, batched=False``
+ - ``function(example: Union[Dict, Any]) -> List[bool]`` if ``with_indices=False, batched=True``
+ - ``function(example: Union[Dict, Any], indices: int) -> List[bool]`` if ``with_indices=True, batched=True``
+
+ If no function is provided, defaults to an always True function: ``lambda x: True``.
+ with_indices (:obj:`bool`, default `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ input_columns (:obj:`str` or `List[str]`, optional): The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (:obj:`bool`, defaults to `False`): Provide batch of examples to `function`
+ batch_size (:obj:`int`, optional, default ``1000``): Number of examples per batch provided to `function` if `batched=True`.
+ """
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+
+ # TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example)
+ info = copy.deepcopy(self._info)
+ info.features = None
+
+ # We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here
+ ex_iterable = FilteredExamplesIterable(
+ TypedExamplesIterable(self._ex_iterable, self._info.features)
+ if self._info.features is not None
+ else self._ex_iterable,
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ )
+ return iterable_dataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ format_type=self._format_type,
+ shuffling=copy.deepcopy(self._shuffling),
+ )
+
def shuffle(
self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
) -> "IterableDataset":
| diff --git a/tests/test_iterable_dataset.py b/tests/test_iterable_dataset.py
index 3d619dbe9c4..c2245e68561 100644
--- a/tests/test_iterable_dataset.py
+++ b/tests/test_iterable_dataset.py
@@ -11,6 +11,7 @@
BufferShuffledExamplesIterable,
CyclingMultiSourcesExamplesIterable,
ExamplesIterable,
+ FilteredExamplesIterable,
IterableDataset,
MappedExamplesIterable,
RandomlyCyclingMultiSourcesExamplesIterable,
@@ -301,6 +302,94 @@ def test_mapped_examples_iterable_input_columns(generate_examples_fn, n, func, b
assert list(x for _, x in ex_iterable) == expected
+@pytest.mark.parametrize(
+ "n, func, batch_size",
+ [
+ (3, lambda x: x["id"] % 2 == 0, None), # keep even number
+ (3, lambda x: [x["id"][0] % 2 == 0], 1), # same with bs=1
+ (5, lambda x: [i % 2 == 0 for i in x["id"]], 10), # same with bs=10
+ (25, lambda x: [i % 2 == 0 for i in x["id"]], 10), # same with bs=10
+ (3, lambda x: False, None), # return 0 examples
+ (3, lambda x: [False] * len(x["id"]), 10), # same with bs=10
+ ],
+)
+def test_filtered_examples_iterable(generate_examples_fn, n, func, batch_size):
+ base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
+ ex_iterable = FilteredExamplesIterable(
+ base_ex_iterable, func, batched=batch_size is not None, batch_size=batch_size
+ )
+ all_examples = [x for _, x in generate_examples_fn(n=n)]
+ if batch_size is None:
+ expected = [x for x in all_examples if func(x)]
+ else:
+ # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
+ expected = []
+ for batch_offset in range(0, len(all_examples), batch_size):
+ examples = all_examples[batch_offset : batch_offset + batch_size]
+ batch = _examples_to_batch(examples)
+ mask = func(batch)
+ expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
+ if expected:
+ assert next(iter(ex_iterable))[1] == expected[0]
+ assert list(x for _, x in ex_iterable) == expected
+
+
+@pytest.mark.parametrize(
+ "n, func, batch_size",
+ [
+ (3, lambda x, index: index % 2 == 0, None), # keep even number
+ (25, lambda x, indices: [idx % 2 == 0 for idx in indices], 10), # same with bs=10
+ ],
+)
+def test_filtered_examples_iterable_with_indices(generate_examples_fn, n, func, batch_size):
+ base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
+ ex_iterable = FilteredExamplesIterable(
+ base_ex_iterable, func, batched=batch_size is not None, batch_size=batch_size, with_indices=True
+ )
+ all_examples = [x for _, x in generate_examples_fn(n=n)]
+ if batch_size is None:
+ expected = [x for idx, x in enumerate(all_examples) if func(x, idx)]
+ else:
+ # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
+ expected = []
+ for batch_offset in range(0, len(all_examples), batch_size):
+ examples = all_examples[batch_offset : batch_offset + batch_size]
+ batch = _examples_to_batch(examples)
+ indices = list(range(batch_offset, batch_offset + len(examples)))
+ mask = func(batch, indices)
+ expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
+ assert next(iter(ex_iterable))[1] == expected[0]
+ assert list(x for _, x in ex_iterable) == expected
+
+
+@pytest.mark.parametrize(
+ "n, func, batch_size, input_columns",
+ [
+ (3, lambda id_: id_ % 2 == 0, None, ["id"]), # keep even number
+ (25, lambda ids_: [i % 2 == 0 for i in ids_], 10, ["id"]), # same with bs=10
+ ],
+)
+def test_filtered_examples_iterable_input_columns(generate_examples_fn, n, func, batch_size, input_columns):
+ base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n})
+ ex_iterable = FilteredExamplesIterable(
+ base_ex_iterable, func, batched=batch_size is not None, batch_size=batch_size, input_columns=input_columns
+ )
+ all_examples = [x for _, x in generate_examples_fn(n=n)]
+ columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns]
+ if batch_size is None:
+ expected = [x for x in all_examples if func(*[x[col] for col in columns_to_input])]
+ else:
+ # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function
+ expected = []
+ for batch_offset in range(0, len(all_examples), batch_size):
+ examples = all_examples[batch_offset : batch_offset + batch_size]
+ batch = _examples_to_batch(examples)
+ mask = func(*[batch[col] for col in columns_to_input])
+ expected.extend([x for x, to_keep in zip(examples, mask) if to_keep])
+ assert next(iter(ex_iterable))[1] == expected[0]
+ assert list(x for _, x in ex_iterable) == expected
+
+
def test_skip_examples_iterable(generate_examples_fn):
total, count = 10, 2
base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total})
| diff --git a/docs/source/package_reference/main_classes.mdx b/docs/source/package_reference/main_classes.mdx
index 36a76a5b57d..8915dec13ef 100644
--- a/docs/source/package_reference/main_classes.mdx
+++ b/docs/source/package_reference/main_classes.mdx
@@ -152,6 +152,7 @@ The base class [`datasets.IterableDataset`] implements an iterable Dataset backe
- cast_column
- __iter__
- map
+ - filter
- shuffle
- skip
- take
diff --git a/docs/source/process.mdx b/docs/source/process.mdx
index b6e228b6b17..5eb535f6bd3 100644
--- a/docs/source/process.mdx
+++ b/docs/source/process.mdx
@@ -83,7 +83,7 @@ There are two options for filtering rows in a dataset: [`datasets.Dataset.select
[`datasets.Dataset.filter`] can also filter by indices if you set `with_indices=True`:
```py
->>> even_dataset = dataset.filter(lambda example, indice: indice % 2 == 0, with_indices=True)
+>>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True)
>>> len(even_dataset)
1834
>>> len(dataset) / 2
diff --git a/docs/source/stream.mdx b/docs/source/stream.mdx
index 7a27ec8e03b..21240157a0b 100644
--- a/docs/source/stream.mdx
+++ b/docs/source/stream.mdx
@@ -184,6 +184,28 @@ See other examples of batch processing in [the batched map processing documentat
</Tip>
+### Filter
+
+You can filter rows in the dataset based on a predicate function using [`datasets.Dataset.filter`]. It returns rows that match a specified condition:
+
+```py
+>>> from datasets import load_dataset
+>>> dataset = load_dataset('oscar', 'unshuffled_deduplicated_en', streaming=True, split='train')
+>>> start_with_ar = dataset.filter(lambda example: example['text'].startswith('Ar'))
+>>> next(iter(start_with_ar))
+{'id': 4, 'text': 'Are you looking for Number the Stars (Essential Modern Classics)?...'}
+```
+
+[`datasets.Dataset.filter`] can also filter by indices if you set `with_indices=True`:
+
+```py
+>>> even_dataset = dataset.filter(lambda example, idx: idx % 2 == 0, with_indices=True)
+>>> list(even_dataset.take(3))
+[{'id': 0, 'text': 'Mtendere Village was inspired by the vision of Chief Napoleon Dzombe, ...'},
+ {'id': 2, 'text': '"I\'d love to help kickstart continued development! And 0 EUR/month...'},
+ {'id': 4, 'text': 'Are you looking for Number the Stars (Essential Modern Classics)? Normally, ...'}]
+```
+
## Stream in a training loop
[`datasets.IterableDataset`] can be integrated into a training loop. First, shuffle the dataset:
| [
{
"components": [
{
"doc": "",
"lines": [
267,
331
],
"name": "FilteredExamplesIterable",
"signature": "class FilteredExamplesIterable(_BaseExamplesIterable):",
"type": "class"
},
{
"doc": "",
"lines": [
... | [
"tests/test_iterable_dataset.py::test_examples_iterable",
"tests/test_iterable_dataset.py::test_examples_iterable_with_kwargs",
"tests/test_iterable_dataset.py::test_examples_iterable_shuffle_data_sources",
"tests/test_iterable_dataset.py::test_examples_iterable_shuffle_shards_and_metadata",
"tests/test_ite... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add IterableDataset.filter
_Needs https://github.com/huggingface/datasets/pull/3801 to be merged first_
I added `IterableDataset.filter` with an API that is a subset of `Dataset.filter`:
```python
def filter(self, function, batched=False, batch_size=1000, with_indices=false, input_columns=None):
```
TODO:
- [x] tests
- [x] docs
related to https://github.com/huggingface/datasets/issues/3444 and https://github.com/huggingface/datasets/issues/3753
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/datasets/iterable_dataset.py]
(definition of FilteredExamplesIterable:)
class FilteredExamplesIterable(_BaseExamplesIterable):
(definition of FilteredExamplesIterable.__init__:)
def __init__( self, ex_iterable: _BaseExamplesIterable, function: Callable, with_indices: bool = False, input_columns: Optional[List[str]] = None, batched: bool = False, batch_size: int = 1000, ):
(definition of FilteredExamplesIterable.__iter__:)
def __iter__(self):
(definition of FilteredExamplesIterable.shuffle_data_sources:)
def shuffle_data_sources(self, seed: Optional[int]) -> "MappedExamplesIterable":
"""Shuffle the wrapped examples iterable."""
(definition of FilteredExamplesIterable.n_shards:)
def n_shards(self) -> int:
(definition of IterableDataset.filter:)
def filter( self, function: Optional[Callable] = None, with_indices=False, input_columns: Optional[Union[str, List[str]]] = None, batched: bool = False, batch_size: Optional[int] = 1000, ) -> "IterableDataset":
"""Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
The filtering is done on-the-fly when iterating over the dataset.
Args:
function (:obj:`Callable`): Callable with one of the following signatures:
- ``function(example: Union[Dict, Any]) -> bool`` if ``with_indices=False, batched=False``
- ``function(example: Union[Dict, Any], indices: int) -> bool`` if ``with_indices=True, batched=False``
- ``function(example: Union[Dict, Any]) -> List[bool]`` if ``with_indices=False, batched=True``
- ``function(example: Union[Dict, Any], indices: int) -> List[bool]`` if ``with_indices=True, batched=True``
If no function is provided, defaults to an always True function: ``lambda x: True``.
with_indices (:obj:`bool`, default `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
input_columns (:obj:`str` or `List[str]`, optional): The columns to be passed into `function` as
positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
batched (:obj:`bool`, defaults to `False`): Provide batch of examples to `function`
batch_size (:obj:`int`, optional, default ``1000``): Number of examples per batch provided to `function` if `batched=True`."""
[end of new definitions in src/datasets/iterable_dataset.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 5142a8cf61d8a4495eda3d91dc4283a6df01ea14 | |
conan-io__conan-10713 | 10,713 | conan-io/conan | null | 3502138d1c339a911c6d1f1b7109c48eed230046 | 2022-03-03T16:58:53Z | diff --git a/conans/cli/commands/graph.py b/conans/cli/commands/graph.py
index 5dac91ffa56..e1b30914549 100644
--- a/conans/cli/commands/graph.py
+++ b/conans/cli/commands/graph.py
@@ -99,4 +99,11 @@ def graph_info(conan_api, parser, subparser, *args):
deps_graph, lockfile = graph_compute(args, conan_api, strict=args.lockfile_strict)
if not args.format:
print_graph_info(deps_graph, args.filter, args.package_filter)
+
+ if args.lockfile_out:
+ lockfile_out = make_abs_path(args.lockfile_out, os.getcwd())
+ ConanOutput().info(f"Saving lockfile: {lockfile_out}")
+ lockfile.save(lockfile_out)
+
return deps_graph, os.path.join(conan_api.cache_folder, "templates")
+
diff --git a/conans/cli/commands/lock.py b/conans/cli/commands/lock.py
index 223e147da77..1944e7432a1 100644
--- a/conans/cli/commands/lock.py
+++ b/conans/cli/commands/lock.py
@@ -5,6 +5,7 @@
from conans.cli.output import ConanOutput
from conans.errors import ConanException
from conans.model.graph_lock import Lockfile
+from conans.model.recipe_ref import RecipeReference
@conan_command(group=COMMAND_GROUPS['consumer'])
@@ -61,3 +62,40 @@ def lock_merge(conan_api, parser, subparser, *args):
lockfile_out = make_abs_path(args.lockfile_out)
result.save(lockfile_out)
ConanOutput().info("Generated lockfile: %s" % lockfile_out)
+
+
+@conan_subcommand()
+def lock_add(conan_api, parser, subparser, *args):
+ """
+ Add requires, build-requires or python-requires to existing or new lockfile. Resulting lockfile
+ will be ordereded, newer versions/revisions first.
+ References can be with our without revisions like "--requires=pkg/version", but they
+ must be package references, including at least the version, they cannot contain a version range
+ """
+ subparser.add_argument('--requires', action="append", help='Add references to lockfile.')
+ subparser.add_argument('--build-requires', action="append",
+ help='Add build-requires to lockfile')
+ subparser.add_argument('--python-requires', action="append",
+ help='Add python-requires to lockfile')
+
+ subparser.add_argument("--lockfile-out", action=OnceArgument,
+ help="Filename of the created lockfile")
+ subparser.add_argument("--lockfile", action=OnceArgument, help="Filename of the input lockfile")
+ args = parser.parse_args(*args)
+
+ if args.lockfile:
+ lockfile = make_abs_path(args.lockfile)
+ lockfile = Lockfile.load(lockfile)
+ else:
+ lockfile = Lockfile() # create a new lockfile
+ requires = [RecipeReference.loads(r) for r in args.requires] if args.requires else None
+ build_requires = [RecipeReference.loads(r) for r in args.build_requires] \
+ if args.build_requires else None
+ python_requires = [RecipeReference.loads(r) for r in args.python_requires] \
+ if args.python_requires else None
+
+ lockfile.add(requires=requires, build_requires=build_requires, python_requires=python_requires)
+
+ lockfile_out = make_abs_path(args.lockfile_out)
+ lockfile.save(lockfile_out)
+ ConanOutput().info("Generated lockfile: %s" % lockfile_out)
diff --git a/conans/model/graph_lock.py b/conans/model/graph_lock.py
index a4e1b32f5d0..6379d5dd8dd 100644
--- a/conans/model/graph_lock.py
+++ b/conans/model/graph_lock.py
@@ -49,6 +49,9 @@ def deserialize(data):
return result
def add(self, ref, package_ids=None):
+ # In case we have an existing, incomplete thing
+ pop_ref = RecipeReference.loads(str(ref))
+ self._requires.pop(pop_ref, None)
if package_ids is not None:
self._requires.setdefault(ref, {}).update(package_ids)
else:
@@ -65,6 +68,7 @@ def merge(self, other):
"""
:type other: _LockRequires
"""
+ # TODO: What happens when merging incomplete refs? Probably str(ref) should be used
for k, v in other._requires.items():
if k in self._requires:
if v is not None:
@@ -140,6 +144,17 @@ def merge(self, other):
self._build_requires.merge(other._build_requires)
self._python_requires.merge(other._python_requires)
+ def add(self, requires=None, build_requires=None, python_requires=None):
+ if requires:
+ for r in requires:
+ self._requires.add(r)
+ if build_requires:
+ for r in build_requires:
+ self._build_requires.add(r)
+ if python_requires:
+ for r in python_requires:
+ self._python_requires.add(r)
+
@staticmethod
def deserialize(data):
""" constructs a GraphLock from a json like dict
| diff --git a/conans/test/assets/genconanfile.py b/conans/test/assets/genconanfile.py
index 686f43d2573..69284f293a3 100644
--- a/conans/test/assets/genconanfile.py
+++ b/conans/test/assets/genconanfile.py
@@ -34,6 +34,7 @@ def __init__(self, name=None, version=None, new_import=False):
self._build_messages = None
self._requires = None
self._requirements = None
+ self._python_requires = None
self._build_requires = None
self._build_requirements = None
self._tool_requires = None
@@ -122,6 +123,13 @@ def with_build_requires(self, *refs):
self._build_requires.append(ref_str)
return self
+ def with_python_requires(self, *refs):
+ self._python_requires = self._python_requires or []
+ for ref in refs:
+ ref_str = self._get_full_ref_str(ref)
+ self._python_requires.append(ref_str)
+ return self
+
def with_tool_requires(self, *refs):
self._tool_requires = self._tool_requires or []
for ref in refs:
@@ -301,6 +309,12 @@ def _build_requires_render(self):
tmp = "build_requires = %s" % line
return tmp
+ @property
+ def _python_requires_render(self):
+ line = ", ".join(['"{}"'.format(r) for r in self._python_requires])
+ tmp = "python_requires = %s" % line
+ return tmp
+
@property
def _tool_requires_render(self):
line = ", ".join(['"{}"'.format(r) for r in self._tool_requires])
@@ -451,7 +465,7 @@ def __repr__(self):
for member in ("name", "version", "package_type", "provides", "deprecated",
"exports_sources", "exports", "generators", "requires", "build_requires",
- "tool_requires", "test_requires", "requirements",
+ "tool_requires", "test_requires", "requirements", "python_requires",
"revision_mode", "settings", "options", "default_options", "build",
"package_method", "package_info", "package_id_lines", "test_lines"
):
diff --git a/conans/test/integration/lockfile/test_lock_requires.py b/conans/test/integration/lockfile/test_lock_requires.py
index 8a0ee50f4c3..592c47bd725 100644
--- a/conans/test/integration/lockfile/test_lock_requires.py
+++ b/conans/test/integration/lockfile/test_lock_requires.py
@@ -76,6 +76,9 @@ def test_conanfile_txt_strict(requires):
assert_error=True)
assert "Requirement 'pkg/[>1.0]@user/testing' not in lockfile" in client.out
+ client.run("install consumer/conanfile.txt --lockfile=conan.lock")
+ assert "pkg/1.2@user/testing" in client.out
+
@pytest.mark.parametrize("requires", ["requires", "tool_requires"])
def test_conditional_os(requires):
diff --git a/conans/test/integration/lockfile/test_user_overrides.py b/conans/test/integration/lockfile/test_user_overrides.py
new file mode 100644
index 00000000000..5e46cb55bc4
--- /dev/null
+++ b/conans/test/integration/lockfile/test_user_overrides.py
@@ -0,0 +1,104 @@
+from conans.test.assets.genconanfile import GenConanfile
+from conans.test.utils.tools import TestClient
+
+
+def test_user_overrides():
+ """ Show that it is possible to add things to lockfiles, to pre-lock things explicitly from
+ user side
+ """
+ c = TestClient()
+ c.save({"math/conanfile.py": GenConanfile("math"),
+ "engine/conanfile.py": GenConanfile("engine", "1.0").with_requires("math/[*]"),
+ "game/conanfile.py": GenConanfile("game", "1.0").with_requires("engine/[*]")})
+
+ c.run("export math --version=1.0")
+ c.run("export math --version=1.1")
+ c.run("export math --version=1.2")
+ c.run("export engine")
+
+ c.run("graph info game")
+ assert "math/1.2" in c.out
+ assert "math/1.0" not in c.out
+
+ c.run("lock add --requires=math/1.0 --requires=unrelated/2.0 --lockfile-out=conan.lock")
+ c.run("graph info game --lockfile=conan.lock --lockfile-out=new.lock")
+ assert "math/1.0" in c.out
+ assert "math/1.2" not in c.out
+ # The resulting lockfile contains the full revision now
+ new_lock = c.load("new.lock")
+ assert "math/1.0#8e1a7a5ce869d8c54ae3d33468fd657" in new_lock
+
+ # Repeat for 1.1
+ c.run("lock add --requires=math/1.1 --requires=unrelated/2.0 --lockfile-out=conan.lock")
+ c.run("graph info game --lockfile=conan.lock --lockfile-out=new.lock")
+ assert "math/1.1" in c.out
+ assert "math/1.0" not in c.out
+ # The resulting lockfile contains the full revision now
+ new_lock = c.load("new.lock")
+ assert "math/1.1#8e1a7a5ce869d8c54ae3d33468fd657" in new_lock
+
+
+def test_user_build_overrides():
+ """ Test that it is possible to lock also build-requries
+ """
+ c = TestClient()
+ c.save({"cmake/conanfile.py": GenConanfile("cmake"),
+ "engine/conanfile.py": GenConanfile("engine", "1.0").with_build_requires("cmake/[*]")})
+
+ c.run("export cmake --version=1.0")
+ c.run("export cmake --version=1.1")
+ c.run("export cmake --version=1.2")
+
+ c.run("graph info engine")
+ assert "cmake/1.2" in c.out
+ assert "cmake/1.0" not in c.out
+
+ c.run("lock add --build-requires=cmake/1.0 --lockfile-out=conan.lock")
+ c.run("graph info engine --lockfile=conan.lock --lockfile-out=new.lock")
+ assert "cmake/1.0" in c.out
+ assert "cmake/1.2" not in c.out
+ # The resulting lockfile contains the full revision now
+ new_lock = c.load("new.lock")
+ assert "cmake/1.0" in new_lock
+
+ # Repeat for 1.1
+ c.run("lock add --build-requires=cmake/1.1 --lockfile-out=conan.lock")
+ c.run("graph info engine --lockfile=conan.lock --lockfile-out=new.lock")
+ assert "cmake/1.1" in c.out
+ assert "cmake/1.0" not in c.out
+ # The resulting lockfile contains the full revision now
+ new_lock = c.load("new.lock")
+ assert "cmake/1.1" in new_lock
+
+
+def test_user_python_overrides():
+ """ Test that it is possible to lock also python-requries
+ """
+ c = TestClient()
+ c.save({"pytool/conanfile.py": GenConanfile("pytool"),
+ "engine/conanfile.py": GenConanfile("engine", "1.0").with_python_requires("pytool/[*]")})
+
+ c.run("export pytool --version=1.0")
+ c.run("export pytool --version=1.1")
+ c.run("export pytool --version=1.2")
+
+ c.run("graph info engine")
+ assert "pytool/1.2" in c.out
+ assert "pytool/1.0" not in c.out
+
+ c.run("lock add --python-requires=pytool/1.0 --lockfile-out=conan.lock")
+ c.run("graph info engine --lockfile=conan.lock --lockfile-out=new.lock")
+ assert "pytool/1.0" in c.out
+ assert "pytool/1.2" not in c.out
+ # The resulting lockfile contains the full revision now
+ new_lock = c.load("new.lock")
+ assert "pytool/1.0" in new_lock
+
+ # Repeat for 1.1
+ c.run("lock add --python-requires=pytool/1.1 --lockfile-out=conan.lock")
+ c.run("graph info engine --lockfile=conan.lock --lockfile-out=new.lock")
+ assert "pytool/1.1" in c.out
+ assert "pytool/1.0" not in c.out
+ # The resulting lockfile contains the full revision now
+ new_lock = c.load("new.lock")
+ assert "pytool/1.1" in new_lock
| [
{
"components": [
{
"doc": "Add requires, build-requires or python-requires to existing or new lockfile. Resulting lockfile\nwill be ordereded, newer versions/revisions first.\nReferences can be with our without revisions like \"--requires=pkg/version\", but they\nmust be package references, inclu... | [
"conans/test/integration/lockfile/test_user_overrides.py::test_user_overrides",
"conans/test/integration/lockfile/test_user_overrides.py::test_user_build_overrides",
"conans/test/integration/lockfile/test_user_overrides.py::test_user_python_overrides"
] | [
"conans/test/integration/lockfile/test_lock_requires.py::test_conanfile_txt_deps_ranges[requires]",
"conans/test/integration/lockfile/test_lock_requires.py::test_conanfile_txt_deps_ranges[tool_requires]",
"conans/test/integration/lockfile/test_lock_requires.py::test_conanfile_txt_deps_ranges_transitive[requires... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
[develop2] proof of concept of adding overrides to lockfiles
Via ``conan lock add`` new command
Close https://github.com/conan-io/conan/pull/10046
Close https://github.com/conan-io/conan/issues/9912
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conans/cli/commands/lock.py]
(definition of lock_add:)
def lock_add(conan_api, parser, subparser, *args):
"""Add requires, build-requires or python-requires to existing or new lockfile. Resulting lockfile
will be ordereded, newer versions/revisions first.
References can be with our without revisions like "--requires=pkg/version", but they
must be package references, including at least the version, they cannot contain a version range"""
[end of new definitions in conans/cli/commands/lock.py]
[start of new definitions in conans/model/graph_lock.py]
(definition of Lockfile.add:)
def add(self, requires=None, build_requires=None, python_requires=None):
[end of new definitions in conans/model/graph_lock.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
conan-io__conan-10659 | 10,659 | conan-io/conan | null | cbbb28978195502cbc43c1f6a8dcafaa91ebb3b6 | 2022-02-24T16:17:26Z | diff --git a/conan/tools/cmake/layout.py b/conan/tools/cmake/layout.py
index 505b077d526..79a89dd60a8 100644
--- a/conan/tools/cmake/layout.py
+++ b/conan/tools/cmake/layout.py
@@ -3,7 +3,7 @@
from conans.errors import ConanException
-def cmake_layout(conanfile, generator=None):
+def cmake_layout(conanfile, generator=None, src_folder="."):
gen = conanfile.conf.get("tools.cmake.cmaketoolchain:generator", default=generator)
if gen:
multi = "Visual" in gen or "Xcode" in gen or "Multi-Config" in gen
@@ -14,7 +14,7 @@ def cmake_layout(conanfile, generator=None):
else:
multi = False
- conanfile.folders.source = "."
+ conanfile.folders.source = src_folder
try:
build_type = str(conanfile.settings.build_type)
except ConanException:
@@ -27,7 +27,8 @@ def cmake_layout(conanfile, generator=None):
conanfile.folders.build = "cmake-build-{}".format(build_type)
conanfile.folders.generators = os.path.join(conanfile.folders.build, "conan")
- conanfile.cpp.source.includedirs = ["src"]
+ conanfile.cpp.source.includedirs = ["include"]
+
if multi:
conanfile.cpp.build.libdirs = ["{}".format(build_type)]
conanfile.cpp.build.bindirs = ["{}".format(build_type)]
diff --git a/conan/tools/layout/__init__.py b/conan/tools/layout/__init__.py
index 50da54c6a0d..3946cf958ee 100644
--- a/conan/tools/layout/__init__.py
+++ b/conan/tools/layout/__init__.py
@@ -1,3 +1,14 @@
+import os
# FIXME: Temporary fixes to avoid breaking 1.45, to be removed 2.0
from conan.tools.cmake import cmake_layout, clion_layout
from conan.tools.microsoft import vs_layout
+
+
+def basic_layout(conanfile, src_folder="."):
+ conanfile.folders.build = "build"
+ if conanfile.settings.get_safe("build_type"):
+ conanfile.folders.build += "-{}".format(str(conanfile.settings.build_type).lower())
+ conanfile.folders.generators = os.path.join(conanfile.folders.build, "conan")
+ conanfile.cpp.build.bindirs = ["."]
+ conanfile.cpp.build.libdirs = ["."]
+ conanfile.folders.source = src_folder
diff --git a/conan/tools/meson/__init__.py b/conan/tools/meson/__init__.py
index 0da85a41394..9f317238a9c 100644
--- a/conan/tools/meson/__init__.py
+++ b/conan/tools/meson/__init__.py
@@ -1,3 +1,3 @@
from conan.tools.meson.toolchain import MesonToolchain
from conan.tools.meson.meson import Meson
-from conan.tools.meson.layout import meson_layout
+
diff --git a/conan/tools/meson/layout.py b/conan/tools/meson/layout.py
deleted file mode 100644
index becdb3b2037..00000000000
--- a/conan/tools/meson/layout.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import os
-
-
-def meson_layout(conanfile):
- conanfile.folders.build = "build-{}".format(str(conanfile.settings.build_type).lower())
- conanfile.folders.generators = os.path.join(conanfile.folders.build, "conan")
- conanfile.cpp.build.bindirs = ["."]
- conanfile.cpp.build.libdirs = ["."]
diff --git a/conans/assets/templates/new_v2_cmake.py b/conans/assets/templates/new_v2_cmake.py
index cbea26fade9..7515d921fc2 100644
--- a/conans/assets/templates/new_v2_cmake.py
+++ b/conans/assets/templates/new_v2_cmake.py
@@ -19,7 +19,7 @@ class {package_name}Conan(ConanFile):
default_options = {{"shared": False, "fPIC": True}}
# Sources are located in the same place as this recipe, copy them to the recipe
- exports_sources = "CMakeLists.txt", "src/*"
+ exports_sources = "CMakeLists.txt", "src/*", "include/*"
def config_options(self):
if self.settings.os == "Windows":
@@ -88,8 +88,9 @@ def test(self):
project({name} CXX)
add_library({name} src/{name}.cpp)
+target_include_directories({name} PUBLIC include)
-set_target_properties({name} PROPERTIES PUBLIC_HEADER "src/{name}.h")
+set_target_properties({name} PROPERTIES PUBLIC_HEADER "include/{name}.h")
install(TARGETS {name} DESTINATION "."
PUBLIC_HEADER DESTINATION include
RUNTIME DESTINATION bin
@@ -213,7 +214,7 @@ def get_cmake_lib_files(name, version, package_name="Pkg"):
files = {"conanfile.py": conanfile_sources_v2.format(name=name, version=version,
package_name=package_name),
"src/{}.cpp".format(name): source_cpp.format(name=name, version=version),
- "src/{}.h".format(name): source_h.format(name=name, version=version),
+ "include/{}.h".format(name): source_h.format(name=name, version=version),
"CMakeLists.txt": cmake_v2.format(name=name, version=version),
"test_package/conanfile.py": test_conanfile_v2.format(name=name,
version=version,
@@ -242,7 +243,7 @@ class {package_name}Conan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
# Sources are located in the same place as this recipe, copy them to the recipe
- exports_sources = "CMakeLists.txt", "src/*"
+ exports_sources = "CMakeLists.txt", "src/*", "include/*"
def layout(self):
cmake_layout(self)
@@ -265,6 +266,7 @@ def package(self):
project({name} CXX)
add_executable({name} src/{name}.cpp src/main.cpp)
+target_include_directories({name} PUBLIC include)
install(TARGETS {name} DESTINATION "."
RUNTIME DESTINATION bin
@@ -294,7 +296,7 @@ def get_cmake_exe_files(name, version, package_name="Pkg"):
files = {"conanfile.py": conanfile_exe.format(name=name, version=version,
package_name=package_name),
"src/{}.cpp".format(name): source_cpp.format(name=name, version=version),
- "src/{}.h".format(name): source_h.format(name=name, version=version),
+ "include/{}.h".format(name): source_h.format(name=name, version=version),
"src/main.cpp": test_main.format(name=name),
"CMakeLists.txt": cmake_exe_v2.format(name=name, version=version),
"test_package/conanfile.py": test_conanfile_exe_v2.format(name=name,
diff --git a/conans/assets/templates/new_v2_meson.py b/conans/assets/templates/new_v2_meson.py
index 1de66f348ca..e0e962070b3 100644
--- a/conans/assets/templates/new_v2_meson.py
+++ b/conans/assets/templates/new_v2_meson.py
@@ -2,7 +2,8 @@
conanfile_sources_v2 = """import os
from conan import ConanFile
-from conan.tools.meson import MesonToolchain, Meson, meson_layout
+from conan.tools.meson import MesonToolchain, Meson
+from conan.tools.layout import basic_layout
from conan.tools.files import copy
class {package_name}Conan(ConanFile):
@@ -22,7 +23,7 @@ def config_options(self):
del self.options.fPIC
def layout(self):
- meson_layout(self)
+ basic_layout(self)
def generate(self):
tc = MesonToolchain(self)
@@ -50,7 +51,8 @@ def package_info(self):
test_conanfile_v2 = """import os
from conan import ConanFile
from conans import tools
-from conan.tools.meson import MesonToolchain, Meson, meson_layout
+from conan.tools.meson import MesonToolchain, Meson
+from conan.tools.layout import basic_layout
class {package_name}TestConan(ConanFile):
@@ -66,7 +68,7 @@ def build(self):
meson.build()
def layout(self):
- meson_layout(self)
+ basic_layout(self)
def test(self):
if not tools.cross_building(self):
| diff --git a/conans/test/assets/cmake.py b/conans/test/assets/cmake.py
index 687c5cedaa7..08cd07813d1 100644
--- a/conans/test/assets/cmake.py
+++ b/conans/test/assets/cmake.py
@@ -33,6 +33,7 @@ def gen_cmakelists(language="CXX", verify=True, project="project", libname="myli
{% if libsources %}
add_library({{libname}} {{libtype}} {% for s in libsources %} {{s}} {% endfor %})
+ target_include_directories({{libname}} PUBLIC "include")
{% endif %}
{% if libsources and find_package %}
@@ -49,6 +50,7 @@ def gen_cmakelists(language="CXX", verify=True, project="project", libname="myli
{% if appsources %}
add_executable({{appname}} {% for s in appsources %} {{s}} {% endfor %})
+ target_include_directories({{appname}} PUBLIC "include")
{% endif %}
{% if appsources and libsources %}
diff --git a/conans/test/assets/pkg_cmake.py b/conans/test/assets/pkg_cmake.py
index 4bd5a3f8b31..60d53943c2c 100644
--- a/conans/test/assets/pkg_cmake.py
+++ b/conans/test/assets/pkg_cmake.py
@@ -17,7 +17,7 @@ def pkg_cmake(name, version, requires=None, exe=False):
class Pkg(ConanFile):
name = "{pkg_name}"
version = "{version}"
- exports_sources = "CMakeLists.txt", "src/*"
+ exports_sources = "CMakeLists.txt", "src/*", "include/*"
{deps}
settings = "os", "compiler", "arch", "build_type"
options = {{"shared": [True, False]}}
@@ -33,7 +33,7 @@ def build(self):
cmake.build()
def package(self):
- self.copy("*.h", dst="include", src="src")
+ self.copy("*.h", dst="include", src="include")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.dylib*", dst="lib", keep_path=False)
@@ -53,7 +53,7 @@ def package_info(self):
src = gen_function_cpp(name=name, includes=deps, calls=deps)
deps = [r.name for r in refs]
- files = {"src/{}.h".format(name): hdr,
+ files = {"include/{}.h".format(name): hdr,
"src/{}.cpp".format(name): src,
"conanfile.py": conanfile}
if exe:
diff --git a/conans/test/functional/layout/test_build_system_layout_helpers.py b/conans/test/functional/layout/test_build_system_layout_helpers.py
index 4bb730ae1ab..767e1bb6923 100644
--- a/conans/test/functional/layout/test_build_system_layout_helpers.py
+++ b/conans/test/functional/layout/test_build_system_layout_helpers.py
@@ -3,11 +3,12 @@
import textwrap
import pytest
+import six
from conans.model.ref import ConanFileReference
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient, TurboTestClient
-from conans.util.files import save
+from conans.util.files import save, load
@pytest.fixture
@@ -146,3 +147,149 @@ def layout(self):
client.save({"conanfile.py": conanfile})
client.run('install .', assert_error=True)
assert " 'build_type' setting not defined, it is necessary for cmake_layout()" in client.out
+
+
+@pytest.mark.skipif(six.PY2, reason="only Py3")
+def test_cmake_layout_external_sources():
+ conanfile = textwrap.dedent("""
+ import os
+ from conans import ConanFile
+ from conan.tools.cmake import cmake_layout
+ from conan.tools.files import save, copy, load
+ class Pkg(ConanFile):
+ settings = "os", "build_type"
+ exports_sources = "exported.txt"
+
+ def layout(self):
+ cmake_layout(self, src_folder="src")
+
+ def generate(self):
+ save(self, "generate.txt", "generate")
+
+ def source(self):
+ save(self, "source.txt", "foo")
+
+ def build(self):
+ c1 = load(self, os.path.join(self.source_folder, "source.txt"))
+ c2 = load(self, os.path.join(self.source_folder, "..", "exported.txt"))
+ save(self, "build.txt", c1 + c2)
+
+ def package(self):
+ copy(self, "build.txt", self.build_folder, os.path.join(self.package_folder, "res"))
+ """)
+
+ client = TestClient()
+ client.save({"conanfile.py": conanfile, "exported.txt": "exported_contents"})
+ client.run("create . foo/1.0@ -s os=Linux")
+ assert "Packaged 1 '.txt' file: build.txt" in client.out
+
+ # Local flow
+ client.run("install . foo/1.0 -s os=Linux")
+ assert os.path.exists(os.path.join(client.current_folder, "cmake-build-release", "conan", "generate.txt"))
+ client.run("source .")
+ assert os.path.exists(os.path.join(client.current_folder, "src", "source.txt"))
+ client.run("build .")
+ contents = load(os.path.join(client.current_folder, "cmake-build-release", "build.txt"))
+ assert contents == "fooexported_contents"
+ client.run("export-pkg . foo/1.0@ --force")
+ assert "Packaged 1 '.txt' file: build.txt" in client.out
+
+
+@pytest.mark.skipif(six.PY2, reason="only Py3")
+@pytest.mark.parametrize("with_build_type", [True, False])
+def test_basic_layout_external_sources(with_build_type):
+ conanfile = textwrap.dedent("""
+ import os
+ from conans import ConanFile
+ from conan.tools.layout import basic_layout
+ from conan.tools.files import save, load, copy
+ class Pkg(ConanFile):
+ settings = "os", "compiler", "arch"{}
+ exports_sources = "exported.txt"
+
+ def layout(self):
+ basic_layout(self, src_folder="src")
+
+ def generate(self):
+ save(self, "generate.txt", "generate")
+
+ def source(self):
+ save(self, "source.txt", "foo")
+
+ def build(self):
+ c1 = load(self, os.path.join(self.source_folder, "source.txt"))
+ c2 = load(self, os.path.join(self.source_folder, "..", "exported.txt"))
+ save(self, "build.txt", c1 + c2)
+
+ def package(self):
+ copy(self, "build.txt", self.build_folder, os.path.join(self.package_folder, "res"))
+ """)
+ if with_build_type:
+ conanfile = conanfile.format(', "build_type"')
+ else:
+ conanfile = conanfile.format("")
+ client = TestClient()
+ client.save({"conanfile.py": conanfile, "exported.txt": "exported_contents"})
+ client.run("create . foo/1.0@ -s os=Linux")
+ assert "Packaged 1 '.txt' file: build.txt" in client.out
+
+ # Local flow
+ build_folder = "build-release" if with_build_type else "build"
+ client.run("install . foo/1.0 -s os=Linux")
+ assert os.path.exists(os.path.join(client.current_folder, build_folder, "conan", "generate.txt"))
+ client.run("source .")
+ assert os.path.exists(os.path.join(client.current_folder, "src", "source.txt"))
+ client.run("build .")
+ contents = load(os.path.join(client.current_folder, build_folder, "build.txt"))
+ assert contents == "fooexported_contents"
+ client.run("export-pkg . foo/1.0@ --force")
+ assert "Packaged 1 '.txt' file: build.txt" in client.out
+
+
+@pytest.mark.skipif(six.PY2, reason="only Py3")
+@pytest.mark.parametrize("with_build_type", [True, False])
+def test_basic_layout_no_external_sources(with_build_type):
+ conanfile = textwrap.dedent("""
+ import os
+ from conans import ConanFile
+ from conan.tools.layout import basic_layout
+ from conan.tools.files import save, load, copy
+ class Pkg(ConanFile):
+ settings = "os", "compiler", "arch"{}
+ exports_sources = "exported.txt"
+
+ def layout(self):
+ basic_layout(self)
+
+ def generate(self):
+ save(self, "generate.txt", "generate")
+
+ def build(self):
+ contents = load(self, os.path.join(self.source_folder, "exported.txt"))
+ save(self, "build.txt", contents)
+
+ def package(self):
+ copy(self, "build.txt", self.build_folder, os.path.join(self.package_folder,
+ "res"))
+ """)
+ if with_build_type:
+ conanfile = conanfile.format(', "build_type"')
+ else:
+ conanfile = conanfile.format("")
+
+ client = TestClient()
+ client.save({"conanfile.py": conanfile, "exported.txt": "exported_contents"})
+ client.run("create . foo/1.0@ -s os=Linux")
+ assert "Packaged 1 '.txt' file: build.txt" in client.out
+
+ # Local flow
+ client.run("install . foo/1.0 -s os=Linux")
+
+ build_folder = "build-release" if with_build_type else "build"
+ assert os.path.exists(os.path.join(client.current_folder, build_folder, "conan", "generate.txt"))
+
+ client.run("build .")
+ contents = load(os.path.join(client.current_folder, build_folder, "build.txt"))
+ assert contents == "exported_contents"
+ client.run("export-pkg . foo/1.0@ --force")
+ assert "Packaged 1 '.txt' file: build.txt" in client.out
diff --git a/conans/test/functional/toolchains/cmake/cmakedeps/test_cmakedeps_and_linker_flags.py b/conans/test/functional/toolchains/cmake/cmakedeps/test_cmakedeps_and_linker_flags.py
index f609dad60ad..9ddb8172807 100644
--- a/conans/test/functional/toolchains/cmake/cmakedeps/test_cmakedeps_and_linker_flags.py
+++ b/conans/test/functional/toolchains/cmake/cmakedeps/test_cmakedeps_and_linker_flags.py
@@ -26,7 +26,7 @@ class HelloConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = {"shared": False}
- exports_sources = "CMakeLists.txt", "src/*"
+ exports_sources = "CMakeLists.txt", "src/*", "include/*"
generators = "CMakeDeps", "CMakeToolchain"
def layout(self):
diff --git a/conans/test/functional/toolchains/cmake/cmakedeps/test_weird_library_names.py b/conans/test/functional/toolchains/cmake/cmakedeps/test_weird_library_names.py
index c81ec0b2df2..b41659ac3f0 100644
--- a/conans/test/functional/toolchains/cmake/cmakedeps/test_weird_library_names.py
+++ b/conans/test/functional/toolchains/cmake/cmakedeps/test_weird_library_names.py
@@ -73,7 +73,7 @@ def test_cmake_find_package(client_weird_lib_name):
from conans import ConanFile, CMake
class Pkg(ConanFile):
- exports_sources = "CMakeLists.txt", "src/*"
+ exports_sources = "CMakeLists.txt", "src/*", "include/*"
settings = "os", "compiler", "arch", "build_type"
generators = "cmake_find_package"
requires = "hello/0.1"
@@ -99,7 +99,7 @@ def test_cmake_find_package_multi(client_weird_lib_name):
from conans import ConanFile, CMake
class Pkg(ConanFile):
- exports_sources = "CMakeLists.txt", "src/*"
+ exports_sources = "CMakeLists.txt", "src/*", "include/*"
settings = "os", "compiler", "arch", "build_type"
generators = "cmake_find_package_multi"
requires = "hello/0.1"
| [
{
"components": [
{
"doc": "",
"lines": [
7,
14
],
"name": "basic_layout",
"signature": "def basic_layout(conanfile, src_folder=\".\"):",
"type": "function"
}
],
"file": "conan/tools/layout/__init__.py"
}
] | [
"conans/test/functional/layout/test_build_system_layout_helpers.py::test_cmake_layout_external_sources",
"conans/test/functional/layout/test_build_system_layout_helpers.py::test_basic_layout_external_sources[True]",
"conans/test/functional/layout/test_build_system_layout_helpers.py::test_basic_layout_external_s... | [
"conans/test/functional/layout/test_build_system_layout_helpers.py::test_layout_in_cache[clion_layout-Debug-x86_64]",
"conans/test/functional/layout/test_build_system_layout_helpers.py::test_layout_in_cache[clion_layout-Debug-x86]",
"conans/test/functional/layout/test_build_system_layout_helpers.py::test_layout... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
basic_layout and external_sources for cmake_layout, removed meson_layout
Changelog: Feature: Added `basic_layout`, removed `meson_layout` and added argument `src_folder` to `cmake_layout`as a shortcut for adjusting `conanfile.folders.source`.
Docs: https://github.com/conan-io/docs/pull/2426
Close https://github.com/conan-io/conan/issues/10645
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/layout/__init__.py]
(definition of basic_layout:)
def basic_layout(conanfile, src_folder="."):
[end of new definitions in conan/tools/layout/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
conan-io__conan-10654 | 10,654 | conan-io/conan | null | 280c77286697db08be368599daa1c828148d04d1 | 2022-02-24T11:53:52Z | diff --git a/conans/client/cmd/build.py b/conans/client/cmd/build.py
index ee215bf2a57..6cda8eb6c04 100644
--- a/conans/client/cmd/build.py
+++ b/conans/client/cmd/build.py
@@ -45,12 +45,7 @@ def cmd_build(app, conanfile_path, base_path, source_folder, build_folder, packa
# Only base_path and conanfile_path will remain
if hasattr(conan_file, "layout"):
conanfile_folder = os.path.dirname(conanfile_path)
- conan_file.folders.set_base_build(layout_build_folder or conanfile_folder)
- conan_file.folders.set_base_source(layout_source_folder or conanfile_folder)
- conan_file.folders.set_base_package(layout_build_folder or conanfile_folder)
- conan_file.folders.set_base_generators(layout_build_folder or conanfile_folder)
- conan_file.folders.set_base_install(layout_build_folder or conanfile_folder)
- conan_file.folders.set_base_imports(layout_build_folder or conanfile_folder)
+ conan_file.folders.set_base_folders(conanfile_folder, layout_build_folder)
else:
conan_file.folders.set_base_build(build_folder)
conan_file.folders.set_base_source(source_folder)
diff --git a/conans/client/cmd/export_pkg.py b/conans/client/cmd/export_pkg.py
index bdc1090f6ab..d39d4850271 100644
--- a/conans/client/cmd/export_pkg.py
+++ b/conans/client/cmd/export_pkg.py
@@ -59,11 +59,8 @@ def _init_conanfile_infos():
conanfile.develop = True
if hasattr(conanfile, "layout"):
conanfile_folder = os.path.dirname(source_conanfile_path)
- conanfile.folders.set_base_build(conanfile_folder)
- conanfile.folders.set_base_source(conanfile_folder)
+ conanfile.folders.set_base_folders(conanfile_folder, output_folder=None)
conanfile.folders.set_base_package(dest_package_folder)
- conanfile.folders.set_base_install(conanfile_folder)
- conanfile.folders.set_base_generators(conanfile_folder)
else:
conanfile.folders.set_base_build(build_folder)
conanfile.folders.set_base_source(source_folder)
diff --git a/conans/client/installer.py b/conans/client/installer.py
index 1ae644cc9bb..68404739d1d 100644
--- a/conans/client/installer.py
+++ b/conans/client/installer.py
@@ -467,12 +467,7 @@ def _handle_node_editable(self, node, profile_host, profile_build, graph_lock):
base_path = package_layout.base_folder()
if hasattr(conanfile, "layout"):
- conanfile.folders.set_base_package(package_layout.output_folder or base_path)
- conanfile.folders.set_base_source(base_path)
- conanfile.folders.set_base_build(package_layout.output_folder or base_path)
- conanfile.folders.set_base_generators(package_layout.output_folder or base_path)
- conanfile.folders.set_base_install(base_path)
- conanfile.folders.set_base_imports(package_layout.output_folder or base_path)
+ conanfile.folders.set_base_folders(base_path, package_layout.output_folder)
else:
conanfile.folders.set_base_package(base_path)
conanfile.folders.set_base_source(None)
diff --git a/conans/model/conan_file.py b/conans/model/conan_file.py
index 08d9fe9775e..f8a04e404fc 100644
--- a/conans/model/conan_file.py
+++ b/conans/model/conan_file.py
@@ -254,6 +254,15 @@ def new_cpp_info(self):
def source_folder(self):
return self.folders.source_folder
+ @property
+ def base_source_folder(self):
+ """ returns the base_source folder, that is the containing source folder in the cache
+ irrespective of the layout() and where the final self.source_folder (computed with the
+ layout()) points.
+ This can be necessary in the source() or build() methods to locate where exported sources
+ are, like patches or entire files that will be used to complete downloaded sources"""
+ return self.folders._base_source
+
@property
def build_folder(self):
return self.folders.build_folder
diff --git a/conans/model/layout.py b/conans/model/layout.py
index 9b20d7ce77b..4d82392fd12 100644
--- a/conans/model/layout.py
+++ b/conans/model/layout.py
@@ -26,10 +26,35 @@ def __init__(self):
self.package = ""
self.generators = ""
self.imports = ""
+ # Relative location of the project root, if the conanfile is not in that project root, but
+ # in a subfolder: e.g: If the conanfile is in a subfolder then self.root = ".."
+ self.root = None
def __repr__(self):
return str(self.__dict__)
+ def set_base_folders(self, conanfile_folder, output_folder):
+ """ this methods can be used for defining all the base folders in the
+ local flow (conan install, source, build), where only the current conanfile location
+ and the potential --output-folder user argument are the folders to take into account
+ If the "layout()" method defines a self.folders.root = "xxx" it will be used to compute
+ the base folder
+
+ @param conanfile_folder: the location where the current consumer conanfile is
+ @param output_folder: Can potentially be None (for export-pkg: TODO), in that case
+ the conanfile location is used"""
+ # This must be called only after ``layout()`` has been called
+ base_folder = conanfile_folder if self.root is None else \
+ os.path.normpath(os.path.join(conanfile_folder, self.root))
+
+ self._base_source = base_folder
+
+ self._base_install = output_folder or base_folder
+ self._base_build = output_folder or base_folder
+ self._base_package = output_folder or base_folder
+ self._base_generators = output_folder or base_folder
+ self._base_imports = output_folder or base_folder
+
@property
def source_folder(self):
if self._base_source is None:
| diff --git a/conans/test/functional/layout/test_exports_sources.py b/conans/test/functional/layout/test_exports_sources.py
new file mode 100644
index 00000000000..6c08b2c6e11
--- /dev/null
+++ b/conans/test/functional/layout/test_exports_sources.py
@@ -0,0 +1,58 @@
+import textwrap
+
+from conans.test.utils.tools import TestClient
+
+
+def test_exports_sources_patch():
+ """
+ tests that using ``self.base_source_folder`` we can access both from the source() and build()
+ methods the folder where the exported sources (patches, new build files) are. And maintain
+ a local flow without exports copies
+ """
+ c = TestClient()
+ conanfile = textwrap.dedent("""
+ import os, shutil
+ from conan import ConanFile
+ from conan.tools.files import load, copy, save
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+
+ exports_sources = "CMakeLists.txt", "patches*"
+
+ def layout(self):
+ self.folders.source = "src"
+
+ def source(self):
+ save(self, "CMakeLists.txt", "old, bad") # EMULATE A DOWNLOAD!
+ base_source = self.base_source_folder
+ mypatch = load(self, os.path.join(base_source, "patches/mypatch"))
+ self.output.info("MYPATCH-SOURCE {}".format(mypatch))
+ shutil.copy(os.path.join(base_source, "CMakeLists.txt"),
+ "CMakeLists.txt")
+
+ def build(self):
+ path = os.path.join(self.source_folder, "CMakeLists.txt")
+ cmake = load(self, path)
+ self.output.info("MYCMAKE-BUILD: {}".format(cmake))
+ path = os.path.join(self.base_source_folder, "patches/mypatch")
+ cmake = load(self, path)
+ self.output.info("MYPATCH-BUILD: {}".format(cmake))
+ """)
+ c.save({"conanfile.py": conanfile,
+ "patches/mypatch": "mypatch!",
+ "CMakeLists.txt": "mycmake!"})
+ c.run("create .")
+ assert "pkg/0.1: MYPATCH-SOURCE mypatch!" in c.out
+ assert "pkg/0.1: MYCMAKE-BUILD: mycmake!" in c.out
+ assert "pkg/0.1: MYPATCH-BUILD: mypatch!" in c.out
+
+ # Local flow
+ c.run("install .")
+ c.run("source .")
+ assert "conanfile.py (pkg/0.1): MYPATCH-SOURCE mypatch!" in c.out
+ assert c.load("CMakeLists.txt") == "mycmake!" # My original one
+ assert c.load("src/CMakeLists.txt") == "mycmake!" # The one patched by "source()"
+ c.run("build .")
+ assert "conanfile.py (pkg/0.1): MYCMAKE-BUILD: mycmake!" in c.out
+ assert "conanfile.py (pkg/0.1): MYPATCH-BUILD: mypatch!" in c.out
diff --git a/conans/test/functional/layout/test_in_subfolder.py b/conans/test/functional/layout/test_in_subfolder.py
new file mode 100644
index 00000000000..d3f44695f6d
--- /dev/null
+++ b/conans/test/functional/layout/test_in_subfolder.py
@@ -0,0 +1,49 @@
+import textwrap
+
+from conans.test.utils.tools import TestClient
+
+
+def test_exports_sources_own_code_in_subfolder():
+ """ test that we can put the conanfile in a subfolder, and it can work. The key is
+ the exports_sources() method that can do:
+ os.path.join(self.recipe_folder, "..")
+ And the layout: self.folders.root = ".."
+ """
+ c = TestClient()
+ conanfile = textwrap.dedent("""
+ import os
+ from conan import ConanFile
+ from conan.tools.files import load, copy
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+
+ def layout(self):
+ self.folders.root = ".."
+ self.folders.source = "."
+ self.folders.build = "build"
+
+ def export_sources(self):
+ source_folder = os.path.join(self.recipe_folder, "..")
+ copy(self, "*.txt", source_folder, self.export_sources_folder)
+
+ def source(self):
+ cmake = load(self, "CMakeLists.txt")
+ self.output.info("MYCMAKE-SRC: {}".format(cmake))
+
+ def build(self):
+ path = os.path.join(self.source_folder, "CMakeLists.txt")
+ cmake = load(self, path)
+ self.output.info("MYCMAKE-BUILD: {}".format(cmake))
+ """)
+ c.save({"conan/conanfile.py": conanfile,
+ "CMakeLists.txt": "mycmake!"})
+ c.run("create conan")
+ assert "pkg/0.1: MYCMAKE-SRC: mycmake!" in c.out
+ assert "pkg/0.1: MYCMAKE-BUILD: mycmake!" in c.out
+
+ # Local flow
+ c.run("install conan")
+ # SOURCE NOT CALLED! It doesnt make sense (will fail due to local exports)
+ c.run("build conan")
+ assert "conanfile.py (pkg/0.1): MYCMAKE-BUILD: mycmake!" in c.out
| [
{
"components": [
{
"doc": "returns the base_source folder, that is the containing source folder in the cache\nirrespective of the layout() and where the final self.source_folder (computed with the\nlayout()) points.\nThis can be necessary in the source() or build() methods to locate where exporte... | [
"conans/test/functional/layout/test_exports_sources.py::test_exports_sources_patch",
"conans/test/functional/layout/test_in_subfolder.py::test_exports_sources_own_code_in_subfolder"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
proposing base_source_folder, and folders.root
Changelog: Feature: Adding ``self.base_source_folder`` for ``exports_sources`` explicit layouts.
Changelog: Feature: Adding ``root`` to layout model to allow conanfile.py in subfolders.
Docs: https://github.com/conan-io/docs/pull/2418
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conans/model/conan_file.py]
(definition of ConanFile.base_source_folder:)
def base_source_folder(self):
"""returns the base_source folder, that is the containing source folder in the cache
irrespective of the layout() and where the final self.source_folder (computed with the
layout()) points.
This can be necessary in the source() or build() methods to locate where exported sources
are, like patches or entire files that will be used to complete downloaded sources"""
[end of new definitions in conans/model/conan_file.py]
[start of new definitions in conans/model/layout.py]
(definition of Folders.set_base_folders:)
def set_base_folders(self, conanfile_folder, output_folder):
"""this methods can be used for defining all the base folders in the
local flow (conan install, source, build), where only the current conanfile location
and the potential --output-folder user argument are the folders to take into account
If the "layout()" method defines a self.folders.root = "xxx" it will be used to compute
the base folder
@param conanfile_folder: the location where the current consumer conanfile is
@param output_folder: Can potentially be None (for export-pkg: TODO), in that case
the conanfile location is used"""
[end of new definitions in conans/model/layout.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
joke2k__faker-1621 | 1,621 | joke2k/faker | null | 805f2a1661b93da8a73f6a1112903b429e92e8a0 | 2022-02-23T11:57:18Z | diff --git a/faker/providers/lorem/fa_IR/__init__.py b/faker/providers/lorem/fa_IR/__init__.py
new file mode 100644
index 0000000000..f848d1d8d1
--- /dev/null
+++ b/faker/providers/lorem/fa_IR/__init__.py
@@ -0,0 +1,925 @@
+from .. import Provider as LoremProvider
+
+
+class Provider(LoremProvider):
+ """Implement lorem provider for ``fa_IR`` locale.
+
+ Word list is based on the source(s) below, and some words have been removed
+ to make the word list appropriate for public testing.
+
+ Sources:
+
+ - https://1000mostcommonwords.com/1000-most-common-persian-words/
+ """
+
+ word_list = (
+ "عنوان",
+ "من",
+ "خود",
+ "که",
+ "او",
+ "بود",
+ "برای",
+ "در",
+ "با",
+ "آنها",
+ "بودن",
+ "در",
+ "یک",
+ "دارند",
+ "این",
+ "از",
+ "توسط",
+ "داغ",
+ "کلمه",
+ "اما",
+ "چه",
+ "برخی",
+ "است",
+ "آن",
+ "شما",
+ "یا",
+ "حال",
+ "تر",
+ "از",
+ "به",
+ "و",
+ "دست",
+ "در",
+ "ما",
+ "میتوانید",
+ "از",
+ "دیگر",
+ "بود",
+ "که",
+ "انجام",
+ "شان",
+ "زمان",
+ "اگر",
+ "خواهدشد",
+ "چگونه",
+ "گفت:",
+ "پا",
+ "هر",
+ "بگو",
+ "میکند",
+ "مجموعه",
+ "سه",
+ "میخواهم",
+ "هوا",
+ "خوبی",
+ "همچنین",
+ "بازی",
+ "کوچک",
+ "پایان",
+ "قراردادن",
+ "خانه",
+ "بهعنوان",
+ "دست",
+ "بندر",
+ "بزرگ",
+ "طلسم",
+ "اضافه",
+ "حتی",
+ "زمین",
+ "اینجا",
+ "باید",
+ "بزرگ",
+ "بالا",
+ "ازجمله",
+ "دنبال",
+ "عمل",
+ "بپرسید",
+ "مردها",
+ "تغییر",
+ "رفت",
+ "نور",
+ "نوع",
+ "خاموش",
+ "نیاز",
+ "خانه",
+ "تصویر",
+ "سعیکنید",
+ "ما",
+ "دوباره",
+ "حیوانات",
+ "نقطه",
+ "مادر",
+ "جهان",
+ "درنزدیکی",
+ "ساخت",
+ "خود",
+ "زمین",
+ "پدر",
+ "هر",
+ "جدید",
+ "کار",
+ "بخش",
+ "را",
+ "دریافت",
+ "محل",
+ "ساخته",
+ "زنده",
+ "کمی",
+ "تنها",
+ "دور",
+ "مرد",
+ "سال",
+ "آمد",
+ "نمایش",
+ "هر",
+ "خوب",
+ "را",
+ "ما",
+ "در",
+ "بسیار",
+ "فقط",
+ "فرم",
+ "حکم",
+ "بزرگ",
+ "میگویند",
+ "کمک",
+ "کم",
+ "خط",
+ "متفاوت",
+ "علت",
+ "بسیار",
+ "متوسط",
+ "قبل",
+ "حرکت",
+ "راست",
+ "پسر",
+ "قدیمی",
+ "هم",
+ "همان",
+ "او",
+ "همه",
+ "وجوددارد",
+ "بالا",
+ "استفاده",
+ "راه",
+ "درمورد",
+ "نوشتن",
+ "را",
+ "مانند",
+ "تا",
+ "اینها",
+ "او",
+ "طولانی",
+ "را",
+ "ببینید",
+ "او",
+ "دو",
+ "دارد",
+ "نگاه",
+ "تر",
+ "روز",
+ "میتواند",
+ "به",
+ "آمده",
+ "انجام",
+ "تعداد",
+ "صدا",
+ "هیچ",
+ "بیشترین",
+ "مردم",
+ "من",
+ "روی",
+ "میدانم",
+ "اب",
+ "تماس",
+ "اولین",
+ "که",
+ "پایین",
+ "سمت",
+ "بوده",
+ "ساعت",
+ "سر",
+ "ایستادن",
+ "خود",
+ "صفحه",
+ "باید",
+ "کشور",
+ "یافت",
+ "پاسخ",
+ "مدرسه",
+ "رشد",
+ "مطالعه",
+ "هنوز",
+ "یادگیری",
+ "کارخانه",
+ "پوشش",
+ "آفتاب",
+ "چهار",
+ "بین",
+ "دولت",
+ "چشم",
+ "هرگز",
+ "آخرین",
+ "اجازه",
+ "فکر",
+ "شهرستان",
+ "درخت",
+ "صلیب",
+ "مزرعه",
+ "سخت",
+ "شروع",
+ "زور",
+ "داستان",
+ "اره",
+ "بسیار",
+ "دریا",
+ "اواخر",
+ "اجرا",
+ "نکن",
+ "مطبوعات",
+ "نزدیک",
+ "شب",
+ "واقعی",
+ "زندگی",
+ "کم",
+ "شمال",
+ "کتاب",
+ "حمل",
+ "علم",
+ "خوردن",
+ "اتاق",
+ "دوستان",
+ "ایده",
+ "ماهی",
+ "کوه",
+ "توقف",
+ "پایه",
+ "گوش",
+ "اسب",
+ "برش",
+ "مطمئن",
+ "تماشای",
+ "رنگ",
+ "صورت",
+ "چوب",
+ "اصلی",
+ "باز",
+ "باهم",
+ "بعدی",
+ "سفید",
+ "کودکان",
+ "شروع",
+ "رو",
+ "مثال",
+ "آسان",
+ "مقاله",
+ "گروه",
+ "همیشه",
+ "موسیقی",
+ "آن",
+ "هردو",
+ "علامت",
+ "غالبا",
+ "نامه",
+ "مایل",
+ "رودخانه",
+ "اتومبیل",
+ "پا",
+ "مراقبت",
+ "دوم",
+ "کافی",
+ "ساده",
+ "دختر",
+ "معمول",
+ "جوان",
+ "اماده",
+ "بالا",
+ "همیشه",
+ "قرمز",
+ "لیست",
+ "هرچند",
+ "احساس",
+ "بحث",
+ "پرنده",
+ "بزودی",
+ "بدن",
+ "سگ",
+ "خانواده",
+ "مستقیم",
+ "مطرح",
+ "ترک",
+ "آهنگ",
+ "درب",
+ "محصول",
+ "کوتاه",
+ "کلاس",
+ "باد",
+ "سوال",
+ "کامل",
+ "کشتی",
+ "منطقه",
+ "نیم",
+ "سنگ",
+ "منظور",
+ "آتش",
+ "جنوب",
+ "مشکل",
+ "قطعه",
+ "گفت",
+ "عبور",
+ "بالا",
+ "تمام",
+ "پادشاه",
+ "خیابان",
+ "اینچ",
+ "ضرب",
+ "هیچ",
+ "البته",
+ "اقامت",
+ "چرخ",
+ "کامل",
+ "نیروی",
+ "آبی",
+ "شی",
+ "سطح",
+ "عمیق",
+ "ماه",
+ "جزیره",
+ "پا",
+ "سیستم",
+ "مشغول",
+ "آزمون",
+ "رکورد",
+ "قایق",
+ "مشترک",
+ "طلا",
+ "ممکن",
+ "هواپیما",
+ "جا",
+ "خشک",
+ "خنده",
+ "هزار",
+ "پیش",
+ "فرار",
+ "بررسی",
+ "بازی",
+ "شکل",
+ "برابر",
+ "داغ",
+ "دست",
+ "آورده",
+ "حرارت",
+ "برف",
+ "لاستیک",
+ "را",
+ "بله",
+ "دور",
+ "پر",
+ "شرق",
+ "رنگ",
+ "زبان",
+ "درمیان",
+ "واحد",
+ "قدرت",
+ "شهر",
+ "خوب",
+ "معین",
+ "پرواز",
+ "سقوط",
+ "شود",
+ "فریاد",
+ "تاریک",
+ "ماشین",
+ "یادداشت",
+ "صبر",
+ "برنامه",
+ "شکل",
+ "ستاره",
+ "جعبه",
+ "اسم",
+ "حوزه",
+ "بقیه",
+ "درست",
+ "قادر",
+ "پوند",
+ "انجام",
+ "زیبایی",
+ "درایو",
+ "شامل",
+ "جلو",
+ "آموزش",
+ "هفته",
+ "نهایی",
+ "به",
+ "سبز",
+ "آه",
+ "سریع",
+ "توسعه",
+ "اقیانوس",
+ "گرم",
+ "رایگان",
+ "دقیقه",
+ "قوی",
+ "ویژه",
+ "ذهن",
+ "روشن",
+ "دم",
+ "محصول",
+ "واقع",
+ "فضا",
+ "شنیده",
+ "بهترین",
+ "ساعت",
+ "بهتر",
+ "در",
+ "صد",
+ "پنج",
+ "گام",
+ "اوایل",
+ "غرب",
+ "زمین",
+ "علاقه",
+ "سریع",
+ "فعل",
+ "شش",
+ "جدول",
+ "سفر",
+ "کمتر",
+ "صبح",
+ "ده",
+ "ساده",
+ "چند",
+ "واکه",
+ "جنگ",
+ "دربرابر",
+ "الگوی",
+ "کند",
+ "مرکز",
+ "فرد",
+ "پول",
+ "خدمت",
+ "جاده",
+ "نقشه",
+ "باران",
+ "قانون",
+ "حکومت",
+ "کشیدن",
+ "سرد",
+ "اطلاع",
+ "صدای",
+ "انرژی",
+ "شکار",
+ "احتمالی",
+ "تخت",
+ "برادر",
+ "سوار",
+ "سلول",
+ "باور",
+ "شاید",
+ "ناگهانی",
+ "شمار",
+ "مربع",
+ "دلیل",
+ "طول",
+ "نمایندگی",
+ "هنر",
+ "موضوع",
+ "منطقه",
+ "اندازه",
+ "کنند",
+ "وزن",
+ "عمومی",
+ "یخ",
+ "موضوع",
+ "دایره",
+ "جفت",
+ "تقسیم",
+ "هجاز",
+ "نمد",
+ "بزرگ",
+ "توپ",
+ "هنوز",
+ "موج",
+ "قلب",
+ "ساعت",
+ "حاضر",
+ "سنگین",
+ "رقص",
+ "موتور",
+ "موقعیت",
+ "دست",
+ "گسترده",
+ "بادبان",
+ "ماده",
+ "بخش",
+ "جنگل",
+ "نشستن",
+ "مسابقه",
+ "پنجره",
+ "فروشگاه",
+ "تابستان",
+ "قطار",
+ "خواب",
+ "ثابت",
+ "تنها",
+ "پا",
+ "ورزش",
+ "دیوار",
+ "گرفتن",
+ "کوه",
+ "آرزو",
+ "آسمان",
+ "لذت",
+ "زمستان",
+ "شنبه",
+ "وحشی",
+ "ابزار",
+ "شیشهای",
+ "چمن",
+ "گاو",
+ "کار",
+ "لبه",
+ "علامت",
+ "بازدید",
+ "گذشته",
+ "نرم",
+ "سرگرم",
+ "روشن",
+ "گاز",
+ "ماه",
+ "میلیون",
+ "تحمل",
+ "پایان",
+ "شاد",
+ "امیدوارم",
+ "گل",
+ "پوشاندن",
+ "رفته",
+ "تجارت",
+ "ملودی",
+ "سفر",
+ "دفتر",
+ "دریافت",
+ "ردیف",
+ "دهان",
+ "دقیق",
+ "نماد",
+ "مرگ",
+ "کمترین",
+ "مشکل",
+ "فریاد",
+ "جز",
+ "نوشت",
+ "دانه",
+ "تن",
+ "عضویت",
+ "تمیز",
+ "استراحت",
+ "خانم",
+ "حیاط",
+ "افزایش",
+ "بد",
+ "ضربه",
+ "نفت",
+ "خون",
+ "رشد",
+ "درصد",
+ "مخلوط",
+ "تیم",
+ "سیم",
+ "هزینه",
+ "قهوهای",
+ "لباس",
+ "باغ",
+ "برابر",
+ "ارسال",
+ "کنید",
+ "سقوط",
+ "مناسب",
+ "جریان",
+ "عادلانه",
+ "بانک",
+ "ذخیره",
+ "کنترل",
+ "اعشاری",
+ "گوش",
+ "دیگر",
+ "کاملا",
+ "شکست",
+ "مورد",
+ "متوسط",
+ "کشتن",
+ "پسر",
+ "دریاچه",
+ "لحظهای",
+ "مقیاس",
+ "باصدا",
+ "بهار",
+ "مشاهده",
+ "کودک",
+ "مستقیم",
+ "همخوان",
+ "کشور",
+ "شیر",
+ "سرعت",
+ "روش",
+ "عضو",
+ "پرداخت",
+ "سن",
+ "بخش",
+ "لباس",
+ "ابر",
+ "تعجب",
+ "آرام",
+ "سنگ",
+ "کوچک",
+ "صعود",
+ "سرد",
+ "طراحی",
+ "ضعیف",
+ "زیادی",
+ "تجربه",
+ "پایین",
+ "کلید",
+ "اهن",
+ "تک",
+ "چوب",
+ "تخت",
+ "بیست",
+ "پوست",
+ "لبخند",
+ "چینی",
+ "سوراخ",
+ "کودک",
+ "هشت",
+ "روستای",
+ "ملاقات",
+ "ریشه",
+ "خرید",
+ "بالابردن",
+ "حل",
+ "فلز",
+ "چه",
+ "فشار",
+ "هفت",
+ "بند",
+ "سوم",
+ "باید",
+ "مو",
+ "توصیف",
+ "آشپز",
+ "طبقه",
+ "یا",
+ "نتیجه",
+ "رایت",
+ "تپه",
+ "امن",
+ "گربه",
+ "قرن",
+ "درنظر",
+ "نوع",
+ "قانون",
+ "بیت",
+ "ساحل",
+ "کپی",
+ "عبارت",
+ "خاموش",
+ "بلند",
+ "شن",
+ "خاک",
+ "رول",
+ "انگشت",
+ "صنعت",
+ "ارزش",
+ "مبارزه",
+ "دروغ",
+ "تحریک",
+ "طبیعی",
+ "نظر",
+ "احساس",
+ "سرمایه",
+ "نه",
+ "صندلی",
+ "خطر",
+ "میوه",
+ "غنی",
+ "ضخامت",
+ "سرباز",
+ "روند",
+ "کار",
+ "عمل",
+ "جداگانه",
+ "دشوار",
+ "دکتر",
+ "لطفا",
+ "محافظت",
+ "ظهر",
+ "محصول",
+ "مدرن",
+ "عنصر",
+ "ضربه",
+ "گوشه",
+ "حزب",
+ "عرضه",
+ "که",
+ "قرار",
+ "حلقه",
+ "شخصیت",
+ "حشرات",
+ "گرفتار",
+ "دوره",
+ "رادیو",
+ "صحبت",
+ "اتم",
+ "انسانی",
+ "تاریخ",
+ "اثر",
+ "برق",
+ "انتظار",
+ "استخوان",
+ "نرده",
+ "ارائه",
+ "توافق",
+ "بنابراین",
+ "ملایم",
+ "زن",
+ "کاپیتان",
+ "لازم",
+ "تیز",
+ "بال",
+ "ایجاد",
+ "همسایه",
+ "شستشو",
+ "خفاش",
+ "نه",
+ "جمعیت",
+ "ذرت",
+ "مقایسه",
+ "شعر",
+ "رشته",
+ "زنگ",
+ "گوشت",
+ "مالیدن",
+ "لوله",
+ "معروف",
+ "دلار",
+ "جریان",
+ "ترس",
+ "نظر",
+ "نازک",
+ "مثلث",
+ "سیاره",
+ "عجلهای",
+ "رئیس",
+ "مستعمره",
+ "ساعت",
+ "معدن",
+ "کراوات",
+ "اصلی",
+ "تازه",
+ "جستجو",
+ "ارسال",
+ "زرد",
+ "اسلحه",
+ "اجازه",
+ "چاپ",
+ "مرده",
+ "نقطه",
+ "بیابان",
+ "جریان",
+ "آسانسور",
+ "افزایش",
+ "رسیدن",
+ "کارشناس",
+ "آهنگ",
+ "ساحل",
+ "بخش",
+ "ورق",
+ "ماده",
+ "اتصال",
+ "پست",
+ "وتر",
+ "چربی",
+ "خوشحالم",
+ "اصلی",
+ "سهم",
+ "ایستگاه",
+ "پدر",
+ "نان",
+ "شارژ",
+ "مناسب",
+ "بار",
+ "پیشنهاد",
+ "بخش",
+ "برده",
+ "اردک",
+ "فوری",
+ "بازار",
+ "درجه",
+ "جمعیت",
+ "جوجه",
+ "عزیز",
+ "دشمن",
+ "پاسخ",
+ "نوشابه",
+ "پشتیبانی",
+ "سخنرانی",
+ "طبیعت",
+ "دامنه",
+ "بخار",
+ "حرکت",
+ "راه",
+ "مایع",
+ "دندانها",
+ "پوسته",
+ "گردن",
+ "اکسیژن",
+ "قند",
+ "مرگ",
+ "خوب",
+ "مهارت",
+ "زنان",
+ "فصل",
+ "مغناطیس",
+ "نقرهای",
+ "تشکر",
+ "شاخه",
+ "مسابقه",
+ "پسوند",
+ "ویژه",
+ "انجیر",
+ "ترس",
+ "بزرگ",
+ "خواهر",
+ "فولاد",
+ "بحث",
+ "مشابه",
+ "راهنمایی",
+ "تجربه",
+ "نمره",
+ "سیب",
+ "خریداری",
+ "رهبری",
+ "زمین",
+ "کت",
+ "جرم",
+ "کارت",
+ "گروه",
+ "طناب",
+ "لغزش",
+ "برنده",
+ "رویا",
+ "شب",
+ "شرایط",
+ "خوراک",
+ "ابزار",
+ "کل",
+ "اساسی",
+ "بوی",
+ "دره",
+ "دو",
+ "صندلی",
+ "ادامه",
+ "بلوک",
+ "نمودار",
+ "کلاه",
+ "فروش",
+ "موفقیت",
+ "شرکت",
+ "تفریق",
+ "رویداد",
+ "خاص",
+ "معامله",
+ "شنا",
+ "مدت",
+ "همسر",
+ "کفش",
+ "شانه",
+ "گسترش",
+ "ترتیب",
+ "اردوگاه",
+ "اختراع",
+ "پنبه",
+ "متولد",
+ "تعیین",
+ "کوارت",
+ "نه",
+ "کامیون",
+ "سطح",
+ "شانس",
+ "فروشگاه",
+ "کشش",
+ "پرتاب",
+ "درخشش",
+ "خاصیت",
+ "ستون",
+ "مولکول",
+ "اشتباه",
+ "خاکستری",
+ "تکرار",
+ "نیاز",
+ "پهن",
+ "آماده",
+ "نمک",
+ "بینی",
+ "جمع",
+ "خشم",
+ "ادعا",
+ "قاره",
+ )
| diff --git a/tests/providers/test_lorem.py b/tests/providers/test_lorem.py
index 22e3d61533..2e7e8ccd6e 100644
--- a/tests/providers/test_lorem.py
+++ b/tests/providers/test_lorem.py
@@ -7,6 +7,7 @@
from faker.providers.lorem.cs_CZ import Provider as CsCzLoremProvider
from faker.providers.lorem.de_AT import Provider as DeAtLoremProvider
from faker.providers.lorem.de_DE import Provider as DeDeLoremProvider
+from faker.providers.lorem.fa_IR import Provider as FaIrLoremProvider
class TestLoremProvider:
@@ -318,6 +319,75 @@ def test_words(self, faker, num_samples):
assert all(isinstance(word, str) and word in AzAzLoremProvider.word_list for word in words)
+class TestFaIr:
+ """Test fa_IR lorem provider"""
+
+ word_list = [word.lower() for word in FaIrLoremProvider.word_list]
+
+ def test_paragraph(self, faker, num_samples):
+ num_sentences = 10
+ for _ in range(num_samples):
+ paragraph = faker.paragraph(nb_sentences=num_sentences)
+ assert isinstance(paragraph, str)
+ words = paragraph.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_paragraphs(self, faker, num_samples):
+ num_paragraphs = 5
+ for _ in range(num_samples):
+ paragraphs = faker.paragraphs(nb=num_paragraphs)
+ for paragraph in paragraphs:
+ assert isinstance(paragraph, str)
+ words = paragraph.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_sentence(self, faker, num_samples):
+ num_words = 10
+ for _ in range(num_samples):
+ sentence = faker.sentence(nb_words=num_words)
+ assert isinstance(sentence, str)
+ words = sentence.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_sentences(self, faker, num_samples):
+ num_sentences = 5
+ for _ in range(num_samples):
+ sentences = faker.sentences(nb=num_sentences)
+ for sentence in sentences:
+ assert isinstance(sentence, str)
+ words = sentence.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_text(self, faker, num_samples):
+ num_chars = 25
+ for _ in range(num_samples):
+ text = faker.text(max_nb_chars=num_chars)
+ assert isinstance(text, str)
+ words = re.sub(r"[.\n]+", " ", text).split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_texts(self, faker, num_samples):
+ num_texts = 5
+ num_chars = 25
+ for _ in range(num_samples):
+ texts = faker.texts(max_nb_chars=num_chars, nb_texts=num_texts)
+ for text in texts:
+ assert isinstance(text, str)
+ words = re.sub(r"[.\n]+", " ", text).split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_word(self, faker, num_samples):
+ for _ in range(num_samples):
+ word = faker.word()
+ assert isinstance(word, str) and word in FaIrLoremProvider.word_list
+
+ def test_words(self, faker, num_samples):
+ num_words = 5
+ for _ in range(num_samples):
+ words = faker.words(num_words)
+ assert all(isinstance(word, str) and word in FaIrLoremProvider.word_list for word in words)
+
+
class TestBnBd:
"""Test bn_BD lorem provider"""
| [
{
"components": [
{
"doc": "Implement lorem provider for ``fa_IR`` locale.\n\nWord list is based on the source(s) below, and some words have been removed\nto make the word list appropriate for public testing.\n\nSources:\n\n- https://1000mostcommonwords.com/1000-most-common-persian-words/",
... | [
"tests/providers/test_lorem.py::TestLoremProvider::test_word_with_defaults",
"tests/providers/test_lorem.py::TestLoremProvider::test_word_with_custom_list",
"tests/providers/test_lorem.py::TestLoremProvider::test_words_with_zero_nb",
"tests/providers/test_lorem.py::TestLoremProvider::test_words_with_defaults"... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
added lorem provider for fa-ir language code
added Lorem provider for the Persian language
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/lorem/fa_IR/__init__.py]
(definition of Provider:)
class Provider(LoremProvider):
"""Implement lorem provider for ``fa_IR`` locale.
Word list is based on the source(s) below, and some words have been removed
to make the word list appropriate for public testing.
Sources:
- https://1000mostcommonwords.com/1000-most-common-persian-words/"""
[end of new definitions in faker/providers/lorem/fa_IR/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | ||
Textualize__rich-1992 | 1,992 | Textualize/rich | null | 21432b4c6ada8886f64ad20fb6a426f6a7efcdcf | 2022-02-22T12:03:08Z | diff --git a/CHANGELOG.md b/CHANGELOG.md
index a93d063911..f11ad82e92 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,8 +9,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
+- Added options to TimeRemainingColumn to render a compact time format and render elapsed time when a task is
+ finished. https://github.com/Textualize/rich/pull/1992
- Added ProgressColumn `MofNCompleteColumn` to display raw `completed/total` column (similar to DownloadColumn,
- but displays values as ints, does not convert to floats or add bit/bytes units).
+ but displays values as ints, does not convert to floats or add bit/bytes units).
https://github.com/Textualize/rich/pull/1941
### Fixed
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index c58f4703b4..9baddef6c5 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -31,4 +31,4 @@ The following people have contributed to the development of Rich:
- [Dennis Brakhane](https://github.com/brakhane)
- [Michał Górny](https://github.com/mgorny)
- [Arian Mollik Wasi](https://github.com/wasi-master)
-
+- [Brian Rutledge](https://github.com/bhrutledge)
diff --git a/rich/progress.py b/rich/progress.py
index bbbdf70ef7..a80a20d2e0 100644
--- a/rich/progress.py
+++ b/rich/progress.py
@@ -343,18 +343,48 @@ def render(self, task: "Task") -> Text:
class TimeRemainingColumn(ProgressColumn):
- """Renders estimated time remaining."""
+ """Renders estimated time remaining.
+
+ Args:
+ compact (bool, optional): Render MM:SS when time remaining is less than an hour. Defaults to False.
+ elapsed_when_finished (bool, optional): Render time elapsed when the task is finished. Defaults to False.
+ """
# Only refresh twice a second to prevent jitter
max_refresh = 0.5
+ def __init__(
+ self,
+ compact: bool = False,
+ elapsed_when_finished: bool = False,
+ table_column: Optional[Column] = None,
+ ):
+ self.compact = compact
+ self.elapsed_when_finished = elapsed_when_finished
+ super().__init__(table_column=table_column)
+
def render(self, task: "Task") -> Text:
"""Show time remaining."""
- remaining = task.time_remaining
- if remaining is None:
- return Text("-:--:--", style="progress.remaining")
- remaining_delta = timedelta(seconds=int(remaining))
- return Text(str(remaining_delta), style="progress.remaining")
+ if self.elapsed_when_finished and task.finished:
+ task_time = task.finished_time
+ style = "progress.elapsed"
+ else:
+ task_time = task.time_remaining
+ style = "progress.remaining"
+
+ if task_time is None:
+ return Text("--:--" if self.compact else "-:--:--", style=style)
+
+ # Based on https://github.com/tqdm/tqdm/blob/master/tqdm/std.py
+ minutes, seconds = divmod(int(task_time), 60)
+ hours, minutes = divmod(minutes, 60)
+
+ if self.compact and not hours:
+ formatted = f"{minutes:02d}:{seconds:02d}"
+ else:
+ formatted = f"{hours:d}:{minutes:02d}:{seconds:02d}"
+
+ return Text(formatted, style=style)
class FileSizeColumn(ProgressColumn):
| diff --git a/tests/test_progress.py b/tests/test_progress.py
index 2dd53ccd2c..8bbf29b648 100644
--- a/tests/test_progress.py
+++ b/tests/test_progress.py
@@ -2,6 +2,7 @@
import io
from time import sleep
+from types import SimpleNamespace
import pytest
@@ -89,6 +90,33 @@ class FakeTask(Task):
assert str(text) == "0:01:00"
+@pytest.mark.parametrize(
+ "task_time, formatted",
+ [
+ (None, "--:--"),
+ (0, "00:00"),
+ (59, "00:59"),
+ (71, "01:11"),
+ (4210, "1:10:10"),
+ ],
+)
+def test_compact_time_remaining_column(task_time, formatted):
+ task = SimpleNamespace(finished=False, time_remaining=task_time)
+ column = TimeRemainingColumn(compact=True)
+
+ assert str(column.render(task)) == formatted
+
+
+def test_time_remaining_column_elapsed_when_finished():
+ task_time = 71
+ formatted = "0:01:11"
+
+ task = SimpleNamespace(finished=True, finished_time=task_time)
+ column = TimeRemainingColumn(elapsed_when_finished=True)
+
+ assert str(column.render(task)) == formatted
+
+
def test_renderable_column():
column = RenderableColumn("foo")
task = Task(1, "test", 100, 20, _get_time=lambda: 1.0)
| diff --git a/CHANGELOG.md b/CHANGELOG.md
index a93d063911..f11ad82e92 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,8 +9,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
+- Added options to TimeRemainingColumn to render a compact time format and render elapsed time when a task is
+ finished. https://github.com/Textualize/rich/pull/1992
- Added ProgressColumn `MofNCompleteColumn` to display raw `completed/total` column (similar to DownloadColumn,
- but displays values as ints, does not convert to floats or add bit/bytes units).
+ but displays values as ints, does not convert to floats or add bit/bytes units).
https://github.com/Textualize/rich/pull/1941
### Fixed
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index c58f4703b4..9baddef6c5 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -31,4 +31,4 @@ The following people have contributed to the development of Rich:
- [Dennis Brakhane](https://github.com/brakhane)
- [Michał Górny](https://github.com/mgorny)
- [Arian Mollik Wasi](https://github.com/wasi-master)
-
+- [Brian Rutledge](https://github.com/bhrutledge)
| [
{
"components": [
{
"doc": "",
"lines": [
356,
364
],
"name": "TimeRemainingColumn.__init__",
"signature": "def __init__( self, compact: bool = False, elapsed_when_finished: bool = False, table_column: Optional[Column] = None, ):",
"type"... | [
"tests/test_progress.py::test_compact_time_remaining_column[None---:--]",
"tests/test_progress.py::test_compact_time_remaining_column[0-00:00]",
"tests/test_progress.py::test_compact_time_remaining_column[59-00:59]",
"tests/test_progress.py::test_compact_time_remaining_column[71-01:11]",
"tests/test_progres... | [
"tests/test_progress.py::test_bar_columns",
"tests/test_progress.py::test_text_column",
"tests/test_progress.py::test_time_elapsed_column",
"tests/test_progress.py::test_time_remaining_column",
"tests/test_progress.py::test_renderable_column",
"tests/test_progress.py::test_spinner_column",
"tests/test_p... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add condensed time column
## Type of changes
- [ ] Bug fix
- [x] New feature
- [ ] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [x] I've run the latest [black](https://github.com/psf/black) with default args on new code.
- [x] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [x] I've added tests for new code.
- [x] I accept that @willmcgugan may be pedantic in the code review.
## Description
While [replacing Twine's progress bar with Rich](https://github.com/pypa/twine/pull/877), I got into some bike-shedding/yak-shaving to declutter the time column. After looking at several download progress indicators (e.g. Chrome, Firefox, `wget`, `scp`), it seems more common to show time remaining than time elapsed. Both `wget` and `scp` show time remaining until the download is finished, then switch to time elapsed. So, I implemented that behavior here.
For example:
https://user-images.githubusercontent.com/1326704/155128465-783d6866-006b-4e9c-bcae-45267b787e24.mp4
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rich/progress.py]
(definition of TimeRemainingColumn.__init__:)
def __init__( self, compact: bool = False, elapsed_when_finished: bool = False, table_column: Optional[Column] = None, ):
[end of new definitions in rich/progress.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | b0661de34bab35af9b4b1d3ba8e28b186b225e84 | |
sympy__sympy-23135 | 23,135 | sympy/sympy | 1.11 | dfef951e777dba36ad75162c8dc9402b228d11ed | 2022-02-20T23:13:06Z | diff --git a/sympy/tensor/array/arrayop.py b/sympy/tensor/array/arrayop.py
index c82b0d8e86b1..d85a68d462ee 100644
--- a/sympy/tensor/array/arrayop.py
+++ b/sympy/tensor/array/arrayop.py
@@ -331,7 +331,7 @@ def derive_by_array(expr, dx):
return diff(expr, dx)
-def permutedims(expr, perm):
+def permutedims(expr, perm=None, index_order_old=None, index_order_new=None):
"""
Permutes the indices of an array.
@@ -363,6 +363,15 @@ def permutedims(expr, perm):
>>> permutedims(b, (1, 2, 0))
[[[1, 5], [2, 6]], [[3, 7], [4, 8]]]
+ An alternative way to specify the same permutations as in the previous
+ lines involves passing the *old* and *new* indices, either as a list or as
+ a string:
+
+ >>> permutedims(b, index_order_old="cba", index_order_new="abc")
+ [[[1, 5], [3, 7]], [[2, 6], [4, 8]]]
+ >>> permutedims(b, index_order_old="cab", index_order_new="abc")
+ [[[1, 5], [2, 6]], [[3, 7], [4, 8]]]
+
``Permutation`` objects are also allowed:
>>> from sympy.combinatorics import Permutation
@@ -376,6 +385,9 @@ def permutedims(expr, perm):
from sympy.tensor.array.expressions.array_expressions import _CodegenArrayAbstract
from sympy.tensor.array.expressions.array_expressions import _permute_dims
from sympy.matrices.expressions.matexpr import MatrixSymbol
+ from sympy.tensor.array.expressions import PermuteDims
+ from sympy.tensor.array.expressions.array_expressions import get_rank
+ perm = PermuteDims._get_permutation_from_arguments(perm, index_order_old, index_order_new, get_rank(expr))
if isinstance(expr, (_ArrayExpr, _CodegenArrayAbstract, MatrixSymbol)):
return _permute_dims(expr, perm)
diff --git a/sympy/tensor/array/expressions/array_expressions.py b/sympy/tensor/array/expressions/array_expressions.py
index 5af09a134b46..2ed9164800ba 100644
--- a/sympy/tensor/array/expressions/array_expressions.py
+++ b/sympy/tensor/array/expressions/array_expressions.py
@@ -379,7 +379,9 @@ def _flatten_args(cls, args):
return new_args
def as_explicit(self):
- return reduce(operator.add, [arg.as_explicit() for arg in self.args])
+ return reduce(
+ operator.add,
+ [arg.as_explicit() if hasattr(arg, "as_explicit") else arg for arg in self.args])
class PermuteDims(_CodegenArrayAbstract):
@@ -410,6 +412,16 @@ class PermuteDims(_CodegenArrayAbstract):
>>> cg.shape
(2, 3)
+ There are optional parameters that can be used as alternative to the permutation:
+
+ >>> from sympy.tensor.array.expressions import ArraySymbol, PermuteDims
+ >>> M = ArraySymbol("M", (1, 2, 3, 4, 5))
+ >>> expr = PermuteDims(M, index_order_old="ijklm", index_order_new="kijml")
+ >>> expr
+ PermuteDims(M, (0 2 1)(3 4))
+ >>> expr.shape
+ (3, 1, 2, 5, 4)
+
Permutations of tensor products are simplified in order to achieve a
standard form:
@@ -446,12 +458,13 @@ class PermuteDims(_CodegenArrayAbstract):
[1, 0, 3, 2]
"""
- def __new__(cls, expr, permutation, **kwargs):
+ def __new__(cls, expr, permutation=None, index_order_old=None, index_order_new=None, **kwargs):
from sympy.combinatorics import Permutation
expr = _sympify(expr)
+ expr_rank = get_rank(expr)
+ permutation = cls._get_permutation_from_arguments(permutation, index_order_old, index_order_new, expr_rank)
permutation = Permutation(permutation)
permutation_size = permutation.size
- expr_rank = get_rank(expr)
if permutation_size != expr_rank:
raise ValueError("Permutation size must be the length of the shape of expr")
@@ -691,7 +704,34 @@ def _nest_permutation(cls, expr, permutation):
return None
def as_explicit(self):
- return permutedims(self.expr.as_explicit(), self.permutation)
+ expr = self.expr
+ if hasattr(expr, "as_explicit"):
+ expr = expr.as_explicit()
+ return permutedims(expr, self.permutation)
+
+ @classmethod
+ def _get_permutation_from_arguments(cls, permutation, index_order_old, index_order_new, dim):
+ if permutation is None:
+ if index_order_new is None or index_order_old is None:
+ raise ValueError("Permutation not defined")
+ return PermuteDims._get_permutation_from_index_orders(index_order_old, index_order_new, dim)
+ else:
+ if index_order_new is not None:
+ raise ValueError("index_order_new cannot be defined with permutation")
+ if index_order_old is not None:
+ raise ValueError("index_order_old cannot be defined with permutation")
+ return permutation
+
+ @classmethod
+ def _get_permutation_from_index_orders(cls, index_order_old, index_order_new, dim):
+ if len(set(index_order_new)) != dim:
+ raise ValueError("wrong number of indices in index_order_new")
+ if len(set(index_order_old)) != dim:
+ raise ValueError("wrong number of indices in index_order_old")
+ if len(set.symmetric_difference(set(index_order_new), set(index_order_old))) > 0:
+ raise ValueError("index_order_new and index_order_old must have the same indices")
+ permutation = [index_order_old.index(i) for i in index_order_new]
+ return permutation
class ArrayDiagonal(_CodegenArrayAbstract):
@@ -903,7 +943,10 @@ def _get_positions_shape(cls, shape, diagonal_indices):
return positions, shape
def as_explicit(self):
- return tensordiagonal(self.expr.as_explicit(), *self.diagonal_indices)
+ expr = self.expr
+ if hasattr(expr, "as_explicit"):
+ expr = expr.as_explicit()
+ return tensordiagonal(expr, *self.diagonal_indices)
class ArrayElementwiseApplyFunc(_CodegenArrayAbstract):
@@ -940,6 +983,12 @@ def _get_function_fdiff(self):
fdiff = Lambda(d, fdiff)
return fdiff
+ def as_explicit(self):
+ expr = self.expr
+ if hasattr(expr, "as_explicit"):
+ expr = expr.as_explicit()
+ return expr.applyfunc(self.function)
+
class ArrayContraction(_CodegenArrayAbstract):
r"""
@@ -1481,7 +1530,10 @@ def _get_contraction_links(self):
return dlinks
def as_explicit(self):
- return tensorcontraction(self.expr.as_explicit(), *self.contraction_indices)
+ expr = self.expr
+ if hasattr(expr, "as_explicit"):
+ expr = expr.as_explicit()
+ return tensorcontraction(expr, *self.contraction_indices)
class Reshape(_CodegenArrayAbstract):
@@ -1536,7 +1588,9 @@ def doit(self, *args, **kwargs):
return Reshape(expr, self.shape)
def as_explicit(self):
- ee = self.expr.as_explicit()
+ ee = self.expr
+ if hasattr(ee, "as_explicit"):
+ ee = ee.as_explicit()
if isinstance(ee, MatrixCommon):
from sympy import Array
ee = Array(ee)
| diff --git a/sympy/tensor/array/expressions/tests/test_array_expressions.py b/sympy/tensor/array/expressions/tests/test_array_expressions.py
index ee7ea4051658..63fb79ab7ced 100644
--- a/sympy/tensor/array/expressions/tests/test_array_expressions.py
+++ b/sympy/tensor/array/expressions/tests/test_array_expressions.py
@@ -736,6 +736,14 @@ def test_array_expr_construction_with_functions():
expr = permutedims(PermuteDims(tp, [1, 0, 2, 3]), [0, 1, 3, 2])
assert expr == PermuteDims(tp, [1, 0, 3, 2])
+ expr = PermuteDims(tp, index_order_new=["a", "b", "c", "d"], index_order_old=["d", "c", "b", "a"])
+ assert expr == PermuteDims(tp, [3, 2, 1, 0])
+
+ arr = Array(range(32)).reshape(2, 2, 2, 2, 2)
+ expr = PermuteDims(arr, index_order_new=["a", "b", "c", "d", "e"], index_order_old=['b', 'e', 'a', 'd', 'c'])
+ assert expr == PermuteDims(arr, [2, 0, 4, 3, 1])
+ assert expr.as_explicit() == permutedims(arr, index_order_new=["a", "b", "c", "d", "e"], index_order_old=['b', 'e', 'a', 'd', 'c'])
+
def test_array_element_expressions():
# Check commutative property:
@@ -784,3 +792,17 @@ def test_array_expr_reshape():
assert expr.expr == C
assert expr.shape == (2, 2)
assert expr.doit() == Array([[1, 2], [3, 4]])
+
+
+def test_array_expr_as_explicit_with_explicit_component_arrays():
+ # Test if .as_explicit() works with explicit-component arrays
+ # nested in array expressions:
+ from sympy.abc import x, y, z, t
+ A = Array([[x, y], [z, t]])
+ assert ArrayTensorProduct(A, A).as_explicit() == tensorproduct(A, A)
+ assert ArrayDiagonal(A, (0, 1)).as_explicit() == tensordiagonal(A, (0, 1))
+ assert ArrayContraction(A, (0, 1)).as_explicit() == tensorcontraction(A, (0, 1))
+ assert ArrayAdd(A, A).as_explicit() == A + A
+ assert ArrayElementwiseApplyFunc(sin, A).as_explicit() == A.applyfunc(sin)
+ assert PermuteDims(A, [1, 0]).as_explicit() == permutedims(A, [1, 0])
+ assert Reshape(A, [4]).as_explicit() == A.reshape(4)
diff --git a/sympy/tensor/array/tests/test_arrayop.py b/sympy/tensor/array/tests/test_arrayop.py
index 4538123f62af..d216102c27d0 100644
--- a/sympy/tensor/array/tests/test_arrayop.py
+++ b/sympy/tensor/array/tests/test_arrayop.py
@@ -1,3 +1,4 @@
+import itertools
import random
from sympy.combinatorics import Permutation
@@ -301,6 +302,25 @@ def test_array_permutedims():
assert B.transpose() == SparseArrayType({10000: 1, 1: 2}, (20000, 10000))
+def test_permutedims_with_indices():
+ A = Array(range(32)).reshape(2, 2, 2, 2, 2)
+ indices_new = list("abcde")
+ indices_old = list("ebdac")
+ new_A = permutedims(A, index_order_new=indices_new, index_order_old=indices_old)
+ for a, b, c, d, e in itertools.product(range(2), range(2), range(2), range(2), range(2)):
+ assert new_A[a, b, c, d, e] == A[e, b, d, a, c]
+ indices_old = list("cabed")
+ new_A = permutedims(A, index_order_new=indices_new, index_order_old=indices_old)
+ for a, b, c, d, e in itertools.product(range(2), range(2), range(2), range(2), range(2)):
+ assert new_A[a, b, c, d, e] == A[c, a, b, e, d]
+ raises(ValueError, lambda: permutedims(A, index_order_old=list("aacde"), index_order_new=list("abcde")))
+ raises(ValueError, lambda: permutedims(A, index_order_old=list("abcde"), index_order_new=list("abcce")))
+ raises(ValueError, lambda: permutedims(A, index_order_old=list("abcde"), index_order_new=list("abce")))
+ raises(ValueError, lambda: permutedims(A, index_order_old=list("abce"), index_order_new=list("abce")))
+ raises(ValueError, lambda: permutedims(A, [2, 1, 0, 3, 4], index_order_old=list("abcde")))
+ raises(ValueError, lambda: permutedims(A, [2, 1, 0, 3, 4], index_order_new=list("abcde")))
+
+
def test_flatten():
from sympy.matrices.dense import Matrix
for ArrayType in [ImmutableDenseNDimArray, ImmutableSparseNDimArray, Matrix]:
| [
{
"components": [
{
"doc": "",
"lines": [
713,
723
],
"name": "PermuteDims._get_permutation_from_arguments",
"signature": "def _get_permutation_from_arguments(cls, permutation, index_order_old, index_order_new, dim):",
"type": "function"
... | [
"test_array_expr_construction_with_functions",
"test_permutedims_with_indices"
] | [
"test_array_symbol_and_element",
"test_zero_array",
"test_one_array",
"test_arrayexpr_contraction_construction",
"test_arrayexpr_array_flatten",
"test_arrayexpr_array_diagonal",
"test_arrayexpr_array_shape",
"test_arrayexpr_permutedims_sink",
"test_arrayexpr_push_indices_up_and_down",
"test_arraye... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
PermuteDims: added easier API
PermuteDims and permutedims now have an easier API based on index orders. The corresponding permutation is then derived internally.
Now you don't need to know the permutation anymore, just the old index order and the new index order.
<!-- BEGIN RELEASE NOTES -->
NO ENTRY
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/tensor/array/expressions/array_expressions.py]
(definition of PermuteDims._get_permutation_from_arguments:)
def _get_permutation_from_arguments(cls, permutation, index_order_old, index_order_new, dim):
(definition of PermuteDims._get_permutation_from_index_orders:)
def _get_permutation_from_index_orders(cls, index_order_old, index_order_new, dim):
(definition of ArrayElementwiseApplyFunc.as_explicit:)
def as_explicit(self):
[end of new definitions in sympy/tensor/array/expressions/array_expressions.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | edf24253833ca153cb6d29ae54092ecebe29614c | ||
sympy__sympy-23111 | 23,111 | sympy/sympy | 1.11 | b9856408a5b9bf5757fc8b4a682565337f402a8f | 2022-02-19T09:31:08Z | diff --git a/sympy/physics/vector/vector.py b/sympy/physics/vector/vector.py
index 60e7db06ae7a..fefb2e7d169e 100644
--- a/sympy/physics/vector/vector.py
+++ b/sympy/physics/vector/vector.py
@@ -1,5 +1,5 @@
from sympy.core.backend import (S, sympify, expand, sqrt, Add, zeros, acos,
- ImmutableMatrix as Matrix, _simplify_matrix)
+ ImmutableMatrix as Matrix, _simplify_matrix)
from sympy.simplify.trigsimp import trigsimp
from sympy.printing.defaults import Printable
from sympy.utilities.misc import filldedent
@@ -751,20 +751,47 @@ def angle_between(self, vec):
return angle
def free_symbols(self, reference_frame):
- """
- Returns the free symbols in the measure numbers of the vector
+ """Returns the free symbols in the measure numbers of the vector
expressed in the given reference frame.
- Parameter
- =========
+ Parameters
+ ==========
+ reference_frame : ReferenceFrame
+ The frame with respect to which the free symbols of the given
+ vector is to be determined.
+
+ Returns
+ =======
+ set of Symbol
+ set of symbols present in the measure numbers of
+ ``reference_frame``.
+
+ """
+
+ return self.to_matrix(reference_frame).free_symbols
+ def free_dynamicsymbols(self, reference_frame):
+ """Returns the free dynamic symbols (functions of time ``t``) in the
+ measure numbers of the vector expressed in the given reference frame.
+
+ Parameters
+ ==========
reference_frame : ReferenceFrame
- The frame with respect to which the free symbols of the
+ The frame with respect to which the free dynamic symbols of the
given vector is to be determined.
+ Returns
+ =======
+ set
+ Set of functions of time ``t``, e.g.
+ ``Function('f')(me.dynamicsymbols._t)``.
+
"""
+ # TODO : Circular dependency if imported at top. Should move
+ # find_dynamicsymbols into physics.vector.functions.
+ from sympy.physics.mechanics.functions import find_dynamicsymbols
- return self.to_matrix(reference_frame).free_symbols
+ return find_dynamicsymbols(self, reference_frame=reference_frame)
def _eval_evalf(self, prec):
if not self.args:
| diff --git a/sympy/physics/vector/tests/test_vector.py b/sympy/physics/vector/tests/test_vector.py
index 713884345af9..7f5eec69aa5a 100644
--- a/sympy/physics/vector/tests/test_vector.py
+++ b/sympy/physics/vector/tests/test_vector.py
@@ -1,5 +1,6 @@
from sympy.core.numbers import (Float, pi)
from sympy.core.symbol import symbols
+from sympy.core.sorting import ordered
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.matrices.immutable import ImmutableDenseMatrix as Matrix
from sympy.physics.vector import ReferenceFrame, Vector, dynamicsymbols, dot
@@ -11,6 +12,21 @@
A = ReferenceFrame('A')
+def test_free_dynamicsymbols():
+ A, B, C, D = symbols('A, B, C, D', cls=ReferenceFrame)
+ a, b, c, d, e, f = dynamicsymbols('a, b, c, d, e, f')
+ B.orient_axis(A, a, A.x)
+ C.orient_axis(B, b, B.y)
+ D.orient_axis(C, c, C.x)
+
+ v = d*D.x + e*D.y + f*D.z
+
+ assert set(ordered(v.free_dynamicsymbols(A))) == {a, b, c, d, e, f}
+ assert set(ordered(v.free_dynamicsymbols(B))) == {b, c, d, e, f}
+ assert set(ordered(v.free_dynamicsymbols(C))) == {c, d, e, f}
+ assert set(ordered(v.free_dynamicsymbols(D))) == {d, e, f}
+
+
def test_Vector():
assert A.x != A.y
assert A.y != A.z
| [
{
"components": [
{
"doc": "Returns the free dynamic symbols (functions of time ``t``) in the\nmeasure numbers of the vector expressed in the given reference frame.\n\nParameters\n==========\nreference_frame : ReferenceFrame\n The frame with respect to which the free dynamic symbols of the\n ... | [
"test_free_dynamicsymbols"
] | [
"test_Vector",
"test_Vector_diffs",
"test_vector_var_in_dcm",
"test_vector_simplify",
"test_vector_evalf",
"test_vector_angle"
] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add .free_dynamicsymbols to physics vectors.
<!-- Your title above should be a short description of what
was changed. Do not include the issue number in the title. -->
#### References to other Issues or PRs
<!-- If this pull request fixes an issue, write "Fixes #NNNN" in that exact
format, e.g. "Fixes #1234" (see
https://tinyurl.com/auto-closing for more information). Also, please
write a comment on that issue linking back to this pull request once it is
open. -->
#### Brief description of what is fixed or changed
Adds a new method to physics vectors that extracts the functions of time. This complements `.free_symbols()`.
#### Other comments
#### Release Notes
<!-- Write the release notes for this release below between the BEGIN and END
statements. The basic format is a bulleted list with the name of the subpackage
and the release note for this PR. For example:
* solvers
* Added a new solver for logarithmic equations.
* functions
* Fixed a bug with log of integers.
or if no release note(s) should be included use:
NO ENTRY
See https://github.com/sympy/sympy/wiki/Writing-Release-Notes for more
information on how to write release notes. The bot will check your release
notes automatically to see if they are formatted correctly. -->
<!-- BEGIN RELEASE NOTES -->
* physics.vector
* Adds a new method to physics vectors that extracts the functions of time: `.free_dynamicsymbols()`.
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/physics/vector/vector.py]
(definition of Vector.free_dynamicsymbols:)
def free_dynamicsymbols(self, reference_frame):
"""Returns the free dynamic symbols (functions of time ``t``) in the
measure numbers of the vector expressed in the given reference frame.
Parameters
==========
reference_frame : ReferenceFrame
The frame with respect to which the free dynamic symbols of the
given vector is to be determined.
Returns
=======
set
Set of functions of time ``t``, e.g.
``Function('f')(me.dynamicsymbols._t)``."""
[end of new definitions in sympy/physics/vector/vector.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | edf24253833ca153cb6d29ae54092ecebe29614c | ||
huggingface__accelerate-255 | 255 | huggingface/accelerate | null | 4fc586f5af650a5711dc907fb613367d2f009c9a | 2022-02-17T21:06:16Z | diff --git a/src/accelerate/accelerator.py b/src/accelerate/accelerator.py
index b841bc809f9..1254c19e9af 100644
--- a/src/accelerate/accelerator.py
+++ b/src/accelerate/accelerator.py
@@ -22,6 +22,7 @@
from packaging import version
+from .checkpointing import load_accelerator_state, save_accelerator_state
from .data_loader import prepare_data_loader
from .kwargs_handlers import DistributedDataParallelKwargs, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler
from .optimizer import AcceleratedOptimizer
@@ -40,6 +41,7 @@
if is_deepspeed_available():
import deepspeed
+
from .deepspeed_utils import DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper
import logging
@@ -560,6 +562,36 @@ def save(self, obj, f):
"""
save(obj, f)
+ def save_state(self, output_dir: str):
+ """
+ Saves the current states of the model, optimizer, scaler, and RNG generators.
+
+ Args:
+ output_dir (:obj:`str` or :obj:`os.PathLike`):
+ The name of the folder to save all relevant weights and states.
+ """
+ # Check if folder exists
+ output_dir = os.path.expanduser(output_dir)
+ os.makedirs(output_dir, exist_ok=True)
+ logger.info(f"Saving current state to {output_dir}")
+ weights = [self.get_state_dict(m) for m in self._models]
+ return save_accelerator_state(output_dir, weights, self._optimizers, self.state.process_index, self.scaler)
+
+ def load_state(self, input_dir: str):
+ """
+ Loads the current states of the model, optimizer, scaler, and RNG generators.
+
+ Args:
+ input_dir (:obj:`str` or :obj:`os.PathLike`):
+ The name of the folder all relevant weights and states were saved in.
+ """
+ # Check if folder exists
+ input_dir = os.path.expanduser(input_dir)
+ if not os.path.isdir(input_dir):
+ raise ValueError(f"Tried to find {input_dir} but folder does not exist")
+ logger.info(f"Loading states from {input_dir}")
+ load_accelerator_state(input_dir, self._models, self._optimizers, self.state.process_index, self.scaler)
+
def free_memory(self):
"""
Will release all references to the internal objects stored and call the garbage collector. You should call this
diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py
index 3af792f1f05..abd5152ae83 100644
--- a/src/accelerate/utils.py
+++ b/src/accelerate/utils.py
@@ -43,6 +43,11 @@ def is_sagemaker_available():
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
+SCALER_NAME = "scaler.pt"
+MODEL_NAME = "pytorch_model"
+RNG_STATE_NAME = "random_states"
+OPTIMIZER_NAME = "optimizer"
+
class RNGType(Enum):
TORCH = "torch"
| diff --git a/src/accelerate/checkpointing.py b/src/accelerate/checkpointing.py
new file mode 100644
index 00000000000..37d68b90c25
--- /dev/null
+++ b/src/accelerate/checkpointing.py
@@ -0,0 +1,134 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import random
+from typing import List
+
+import numpy as np
+import torch
+from torch.cuda.amp import GradScaler
+
+from .state import is_tpu_available
+from .utils import MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SCALER_NAME, save
+
+
+if is_tpu_available():
+ import torch_xla.core.xla_model as xm
+
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+def save_accelerator_state(
+ output_dir: str, model_states: List[dict], optimizers: list, process_index: int, scaler: GradScaler = None
+):
+ """
+ Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.
+
+ Args:
+ output_dir (:obj:`str` or :obj:`os.PathLike`):
+ The name of the folder to save all relevant weights and states.
+ model_states (:obj:`List[torch.nn.Module]`):
+ A list of model states
+ optimizers (:obj:`List[torch.optim.Optimizer]`):
+ A list of optimizer instances
+ process_index (:obj:`int`):
+ The current process index in the Accelerator state
+ scaler (:obj:`torch.cuda.amp.GradScaler`, `optional`):
+ An optional gradient scaler instance to save
+ """
+ # Model states
+ for i, state in enumerate(model_states):
+ weights_name = f"{MODEL_NAME}.bin" if i == 0 else f"{MODEL_NAME}_{i}.bin"
+ output_model_file = os.path.join(output_dir, weights_name)
+ save(state, output_model_file)
+ logger.info(f"Model weights saved in {output_model_file}")
+ # Optimizer states
+ for i, opt in enumerate(optimizers):
+ state = opt.state_dict()
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
+ output_optimizer_file = os.path.join(output_dir, optimizer_name)
+ save(state, output_optimizer_file)
+ logger.info(f"Optimizer state saved in {output_optimizer_file}")
+ # GradScaler state
+ if scaler is not None:
+ state = scaler.state_dict()
+ output_scaler_file = os.path.join(output_dir, SCALER_NAME)
+ torch.save(state, output_scaler_file)
+ logger.info(f"Gradient scaler state saved in {output_scaler_file}")
+ # Random number generator states
+ states = {}
+ states_name = f"{RNG_STATE_NAME}_{process_index}.pkl"
+ states["random_state"] = random.getstate()
+ states["numpy_random_seed"] = np.random.get_state()
+ states["torch_manual_seed"] = torch.get_rng_state()
+ states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all()
+ # ^^ safe to call this function even if cuda is not available
+ if is_tpu_available():
+ states["xm_seed"] = torch.tensor(xm.get_rng_state())
+ output_states_file = os.path.join(output_dir, states_name)
+ torch.save(states, output_states_file)
+ logger.info(f"Random states saved in {output_states_file}")
+ return output_dir
+
+
+def load_accelerator_state(input_dir, models, optimizers, process_index, scaler=None):
+ """
+ Loads states of the models, optimizers, scaler, and RNG generators from a given directory.
+
+ Args:
+ input_dir (:obj:`str` or :obj:`os.PathLike`):
+ The name of the folder to load all relevant weights and states.
+ model_stmodelsates (:obj:`List[torch.nn.Module]`):
+ A list of model instances
+ optimizers (:obj:`List[torch.optim.Optimizer]`):
+ A list of optimizer instances
+ process_index (:obj:`int`):
+ The current process index in the Accelerator state
+ scaler (:obj:`torch.cuda.amp.GradScaler`, `optional`):
+ An optional `GradScaler` instance to load
+ """
+ # Model states
+ for i, model in enumerate(models):
+ weights_name = f"{MODEL_NAME}.bin" if i == 0 else f"{MODEL_NAME}_{i}.bin"
+ input_model_file = os.path.join(input_dir, weights_name)
+ models[i].load_state_dict(torch.load(input_model_file))
+ logger.info("All model weights loaded successfully")
+
+ # Optimizer states
+ for i, opt in enumerate(optimizers):
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
+ input_optimizer_file = os.path.join(input_dir, optimizer_name)
+ optimizers[i].load_state_dict(torch.load(input_optimizer_file))
+ logger.info("All optimizer states loaded successfully")
+
+ # GradScaler state
+ if scaler is not None:
+ input_scaler_file = os.path.join(input_dir, SCALER_NAME)
+ scaler.load_state_dict(torch.load(input_scaler_file))
+ logger.info("GradScaler state loaded successfully")
+
+ # Random states
+ states = torch.load(os.path.join(input_dir, f"{RNG_STATE_NAME}_{process_index}.pkl"))
+ random.setstate(states["random_state"])
+ np.random.set_state(states["numpy_random_seed"])
+ torch.set_rng_state(states["torch_manual_seed"])
+ torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"])
+ # ^^ safe to call this function even if cuda is not available
+ if is_tpu_available():
+ xm.set_rng_state(states["xm_seed"])
+ logger.info("All random states loaded successfully")
diff --git a/tests/test_state_checkpointing.py b/tests/test_state_checkpointing.py
new file mode 100644
index 00000000000..a74dcb7247b
--- /dev/null
+++ b/tests/test_state_checkpointing.py
@@ -0,0 +1,125 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import random
+import tempfile
+import unittest
+
+import torch
+from torch import nn
+from torch.utils.data import DataLoader, TensorDataset
+
+from accelerate import Accelerator
+from accelerate.utils import set_seed
+
+
+logger = logging.getLogger(__name__)
+
+
+def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2):
+ "Generates a tuple of dummy DataLoaders to test with"
+
+ def get_dataset(n_batches):
+ x = torch.randn(batch_size * n_batches, 1)
+ return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1))
+
+ train_dataset = get_dataset(n_train_batches)
+ valid_dataset = get_dataset(n_valid_batches)
+ train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)
+ valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4)
+ return (train_dataloader, valid_dataloader)
+
+
+def train(num_epochs, model, dataloader, optimizer, accelerator):
+ "Trains for `num_epochs`"
+ rands = []
+ for epoch in range(num_epochs):
+ # Train quickly
+ model.train()
+ for step, batch in enumerate(dataloader):
+ x, y = batch
+ outputs = model(x)
+ loss = torch.nn.functional.mse_loss(outputs, y)
+ accelerator.backward(loss)
+ optimizer.step()
+ optimizer.zero_grad()
+ rands.append(random.random()) # Introduce some randomness
+ return rands
+
+
+class DummyModel(nn.Module):
+ "Simple model to do y=mx+b"
+
+ def __init__(self):
+ super().__init__()
+ self.a = nn.Parameter(torch.randn(1))
+ self.b = nn.Parameter(torch.randn(1))
+
+ def forward(self, x):
+ return x * self.a + self.b
+
+
+class CheckpointTest(unittest.TestCase):
+ def test_can_resume_training(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ set_seed(42)
+ model = DummyModel()
+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
+ train_dataloader, valid_dataloader = dummy_dataloaders()
+ # Train baseline
+ accelerator = Accelerator()
+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(
+ model, optimizer, train_dataloader, valid_dataloader
+ )
+ # Save initial
+ initial = os.path.join(tmpdir, "initial")
+ accelerator.save_state(initial)
+ (a, b) = model.a.item(), model.b.item()
+ opt_state = optimizer.state_dict()
+ ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator)
+ (a1, b1) = model.a.item(), model.b.item()
+ opt_state1 = optimizer.state_dict()
+
+ # Train partially
+ set_seed(42)
+ model = DummyModel()
+ optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)
+ train_dataloader, valid_dataloader = dummy_dataloaders()
+ accelerator = Accelerator()
+ model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare(
+ model, optimizer, train_dataloader, valid_dataloader
+ )
+ accelerator.load_state(initial)
+ (a2, b2) = model.a.item(), model.b.item()
+ opt_state2 = optimizer.state_dict()
+ self.assertEqual(a, a2)
+ self.assertEqual(b, b2)
+ self.assertEqual(opt_state, opt_state2)
+
+ test_rands = train(2, model, train_dataloader, optimizer, accelerator)
+ # Save everything
+ checkpoint = os.path.join(tmpdir, "checkpoint")
+ accelerator.save_state(checkpoint)
+
+ # Load everything back in and make sure all states work
+ accelerator.load_state(checkpoint)
+ test_rands += train(1, model, train_dataloader, optimizer, accelerator)
+ (a3, b3) = model.a.item(), model.b.item()
+ opt_state3 = optimizer.state_dict()
+ self.assertEqual(a1, a3)
+ self.assertEqual(b1, b3)
+ self.assertEqual(opt_state1, opt_state3)
+ self.assertEqual(ground_truth_rands, test_rands)
| [
{
"components": [
{
"doc": "Saves the current states of the model, optimizer, scaler, and RNG generators.\n\nArgs:\n output_dir (:obj:`str` or :obj:`os.PathLike`):\n The name of the folder to save all relevant weights and states.",
"lines": [
565,
578
... | [
"tests/test_state_checkpointing.py::CheckpointTest::test_can_resume_training"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add in checkpointing capability
Closes https://github.com/huggingface/accelerate/issues/171
This PR adds in two functions, `Accelerator.save_state` and `Accelerator.load_state`, which will go through and checkpoint the current state of everything accelerator touches.
I've included a testing script, but this needs to be properly turned into a test, as currently it is just a script with a bunch of asserts. I have tested successfully on:
- Single GPU
- Single CPU
- Multi-GPU
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/accelerate/accelerator.py]
(definition of Accelerator.save_state:)
def save_state(self, output_dir: str):
"""Saves the current states of the model, optimizer, scaler, and RNG generators.
Args:
output_dir (:obj:`str` or :obj:`os.PathLike`):
The name of the folder to save all relevant weights and states."""
(definition of Accelerator.load_state:)
def load_state(self, input_dir: str):
"""Loads the current states of the model, optimizer, scaler, and RNG generators.
Args:
input_dir (:obj:`str` or :obj:`os.PathLike`):
The name of the folder all relevant weights and states were saved in."""
[end of new definitions in src/accelerate/accelerator.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 08101b9dde2b1a9658c2e363e3e9f5663ba06073 | ||
conan-io__conan-10608 | 10,608 | conan-io/conan | null | e661edaa4d8ccd0721525e1705f20178190c0f65 | 2022-02-17T17:11:38Z | diff --git a/conan/tools/apple/__init__.py b/conan/tools/apple/__init__.py
index 06c11573a73..b3a7cb5a388 100644
--- a/conan/tools/apple/__init__.py
+++ b/conan/tools/apple/__init__.py
@@ -6,3 +6,5 @@
# from conan.tools.apple.apple import apple_deployment_target_flag
# from conan.tools.apple.apple import to_apple_arch
from conan.tools.apple.xcodedeps import XcodeDeps
+from conan.tools.apple.xcodebuild import XcodeBuild
+from conan.tools.apple.xcodetoolchain import XcodeToolchain
diff --git a/conan/tools/apple/apple.py b/conan/tools/apple/apple.py
index c9b2544c7fa..13fbdafaef9 100644
--- a/conan/tools/apple/apple.py
+++ b/conan/tools/apple/apple.py
@@ -41,7 +41,8 @@ def apple_sdk_name(settings):
arch = settings.get_safe('arch')
os_ = settings.get_safe('os')
os_sdk = settings.get_safe('os.sdk')
- return os_sdk or _guess_apple_sdk_name(os_, arch)
+ os_sdk_version = settings.get_safe('os.sdk_version') or ""
+ return "{}{}".format(os_sdk, os_sdk_version) if os_sdk else _guess_apple_sdk_name(os_, arch)
def apple_min_version_flag(conanfile):
diff --git a/conan/tools/apple/xcodebuild.py b/conan/tools/apple/xcodebuild.py
new file mode 100644
index 00000000000..8eae6428d4c
--- /dev/null
+++ b/conan/tools/apple/xcodebuild.py
@@ -0,0 +1,37 @@
+import os
+
+from conan.tools.apple.apple import to_apple_arch
+from conans.errors import ConanException
+
+
+class XcodeBuild(object):
+ def __init__(self, conanfile):
+ self._conanfile = conanfile
+ self._build_type = conanfile.settings.get_safe("build_type")
+ self._arch = to_apple_arch(conanfile.settings.get_safe("arch"))
+ self._sdk = conanfile.settings.get_safe("os.sdk") or ""
+ self._sdk_version = conanfile.settings.get_safe("os.sdk_version") or ""
+
+ @property
+ def _verbosity(self):
+ verbosity = self._conanfile.conf.get("tools.apple.xcodebuild:verbosity", default="", check_type=str)
+ if verbosity == "quiet" or verbosity == "verbose":
+ return "-{}".format(verbosity)
+ elif verbosity:
+ raise ConanException("Value {} for 'tools.apple.xcodebuild:verbosity' is not valid".format(verbosity))
+ return ""
+
+ @property
+ def _sdkroot(self):
+ # User's sdk_path has priority, then if specified try to compose sdk argument
+ # with sdk/sdk_version settings, leave blank otherwise and the sdk will be automatically
+ # chosen by the build system
+ sdk = self._conanfile.conf.get("tools.apple:sdk_path")
+ if not sdk and self._sdk:
+ sdk = "{}{}".format(self._sdk, self._sdk_version)
+ return "SDKROOT={}".format(sdk) if sdk else ""
+
+ def build(self, xcodeproj):
+ cmd = "xcodebuild -project {} -configuration {} -arch {} " \
+ "{} {}".format(xcodeproj, self._build_type, self._arch, self._sdkroot, self._verbosity)
+ self._conanfile.run(cmd)
diff --git a/conan/tools/apple/xcodedeps.py b/conan/tools/apple/xcodedeps.py
index 2dd2a887842..64d618faf1a 100644
--- a/conan/tools/apple/xcodedeps.py
+++ b/conan/tools/apple/xcodedeps.py
@@ -8,22 +8,66 @@
from conans.util.files import load, save
from conan.tools.apple.apple import to_apple_arch
+GLOBAL_XCCONFIG_TEMPLATE = textwrap.dedent("""\
+ // Includes both the toolchain and the dependencies
+ // files if they exist
+
+ """)
+
+GLOBAL_XCCONFIG_FILENAME = "conan_config.xcconfig"
+
+
+def _xcconfig_settings_filename(settings):
+ arch = settings.get_safe("arch")
+ architecture = to_apple_arch(arch) or arch
+ props = [("configuration", settings.get_safe("build_type")),
+ ("architecture", architecture),
+ ("sdk name", settings.get_safe("os.sdk")),
+ ("sdk version", settings.get_safe("os.sdk_version"))]
+ name = "".join("_{}".format(v) for _, v in props if v is not None and v)
+ name = name.replace(".", "_").replace("-", "_")
+ return name.lower()
+
+
+def _xcconfig_conditional(settings):
+ sdk_condition = "*"
+ arch = settings.get_safe("arch")
+ architecture = to_apple_arch(arch) or arch
+ if settings.get_safe("os.sdk"):
+ sdk_condition = "{}{}".format(settings.get_safe("os.sdk"), settings.get_safe("os.sdk_version") or "*")
+
+ return "[config={}][arch={}][sdk={}]".format(settings.get_safe("build_type"), architecture, sdk_condition)
+
+
+def _add_include_to_file_or_create(filename, template, include):
+ if os.path.isfile(filename):
+ content = load(filename)
+ else:
+ content = template
+
+ if include not in content:
+ content = content + '#include "{}"\n'.format(include)
+
+ return content
+
class XcodeDeps(object):
+ general_name = "conandeps.xcconfig"
+
_vars_xconfig = textwrap.dedent("""\
// Definition of Conan variables for {{name}}
- CONAN_{{name}}_BINARY_DIRECTORIES[config={{configuration}}][arch={{architecture}}] = {{bin_dirs}}
- CONAN_{{name}}_C_COMPILER_FLAGS[config={{configuration}}][arch={{architecture}}] = {{c_compiler_flags}}
- CONAN_{{name}}_CXX_COMPILER_FLAGS[config={{configuration}}][arch={{architecture}}] = {{cxx_compiler_flags}}
- CONAN_{{name}}_LINKER_FLAGS[config={{configuration}}][arch={{architecture}}] = {{linker_flags}}
- CONAN_{{name}}_PREPROCESSOR_DEFINITIONS[config={{configuration}}][arch={{architecture}}] = {{definitions}}
- CONAN_{{name}}_INCLUDE_DIRECTORIES[config={{configuration}}][arch={{architecture}}] = {{include_dirs}}
- CONAN_{{name}}_RESOURCE_DIRECTORIES[config={{configuration}}][arch={{architecture}}] = {{res_dirs}}
- CONAN_{{name}}_LIBRARY_DIRECTORIES[config={{configuration}}][arch={{architecture}}] = {{lib_dirs}}
- CONAN_{{name}}_LIBRARIES[config={{configuration}}][arch={{architecture}}] = {{libs}}
- CONAN_{{name}}_SYSTEM_LIBS[config={{configuration}}][arch={{architecture}}] = {{system_libs}}
- CONAN_{{name}}_FRAMEWORKS_DIRECTORIES[config={{configuration}}][arch={{architecture}}] = {{frameworkdirs}}
- CONAN_{{name}}_FRAMEWORKS[config={{configuration}}][arch={{architecture}}] = {{frameworks}}
+ CONAN_{{name}}_BINARY_DIRECTORIES{{condition}} = {{bin_dirs}}
+ CONAN_{{name}}_C_COMPILER_FLAGS{{condition}} = {{c_compiler_flags}}
+ CONAN_{{name}}_CXX_COMPILER_FLAGS{{condition}} = {{cxx_compiler_flags}}
+ CONAN_{{name}}_LINKER_FLAGS{{condition}} = {{linker_flags}}
+ CONAN_{{name}}_PREPROCESSOR_DEFINITIONS{{condition}} = {{definitions}}
+ CONAN_{{name}}_INCLUDE_DIRECTORIES{{condition}} = {{include_dirs}}
+ CONAN_{{name}}_RESOURCE_DIRECTORIES{{condition}} = {{res_dirs}}
+ CONAN_{{name}}_LIBRARY_DIRECTORIES{{condition}} = {{lib_dirs}}
+ CONAN_{{name}}_LIBRARIES{{condition}} = {{libs}}
+ CONAN_{{name}}_SYSTEM_LIBS{{condition}} = {{system_libs}}
+ CONAN_{{name}}_FRAMEWORKS_DIRECTORIES{{condition}} = {{frameworkdirs}}
+ CONAN_{{name}}_FRAMEWORKS{{condition}} = {{frameworks}}
""")
_conf_xconfig = textwrap.dedent("""\
@@ -70,13 +114,11 @@ class XcodeDeps(object):
def __init__(self, conanfile):
self._conanfile = conanfile
self.configuration = conanfile.settings.get_safe("build_type")
-
arch = conanfile.settings.get_safe("arch")
self.architecture = to_apple_arch(arch) or arch
-
- # TODO: check if it makes sense to add a subsetting for sdk version
- # related to: https://github.com/conan-io/conan/issues/9608
self.os_version = conanfile.settings.get_safe("os.version")
+ self.sdk = conanfile.settings.get_safe("os.sdk")
+ self.sdk_version = conanfile.settings.get_safe("os.sdk_version")
check_using_build_profile(self._conanfile)
def generate(self):
@@ -88,24 +130,13 @@ def generate(self):
for generator_file, content in generator_files.items():
save(generator_file, content)
- def _config_filename(self):
- # Default name
- props = [("configuration", self.configuration),
- ("architecture", self.architecture)]
- name = "".join("_{}".format(v) for _, v in props if v is not None)
- return name.lower()
-
def _vars_xconfig_file(self, dep, name, cpp_info):
"""
- content for conan_vars_poco_x86_release.xcconfig, containing the variables
+ returns a .xcconfig file with the variables definition for one package for one configuration
"""
- # returns a .xcconfig file with the variables definition for one package for one configuration
- pkg_placeholder = "$(CONAN_{}_ROOT_FOLDER_{})/".format(name, self.configuration)
fields = {
'name': name,
- 'configuration': self.configuration,
- 'architecture': self.architecture,
'root_folder': dep.package_folder,
'bin_dirs': " ".join('"{}"'.format(os.path.join(dep.package_folder, p)) for p in cpp_info.bindirs),
'res_dirs': " ".join('"{}"'.format(os.path.join(dep.package_folder, p)) for p in cpp_info.resdirs),
@@ -120,6 +151,7 @@ def _vars_xconfig_file(self, dep, name, cpp_info):
'cxx_compiler_flags': " ".join(cpp_info.cxxflags),
'linker_flags': " ".join(cpp_info.sharedlinkflags),
'exe_flags': " ".join(cpp_info.exelinkflags),
+ 'condition': _xcconfig_conditional(self._conanfile.settings)
}
formatted_template = Template(self._vars_xconfig).render(**fields)
return formatted_template
@@ -128,11 +160,6 @@ def _conf_xconfig_file(self, dep_name, vars_xconfig_name):
"""
content for conan_poco_x86_release.xcconfig, containing the activation
"""
- # TODO: when it's more clear what to do with the sdk, add the condition for it and also
- # we are not taking into account the version for the sdk because we probably
- # want to model also the sdk version decoupled of the compiler version
- # for example XCode 13 is now using sdk=macosx11.3
- # related to: https://github.com/conan-io/conan/issues/9608
template = Template(self._conf_xconfig)
content_multi = template.render(name=dep_name, vars_filename=vars_xconfig_name)
return content_multi
@@ -166,10 +193,15 @@ def _all_xconfig_file(self, deps):
content_multi = content_multi + '\n#include "conan_{}.xcconfig"\n'.format(dep_name)
return content_multi
+ @property
+ def _global_xconfig_content(self):
+ return _add_include_to_file_or_create(GLOBAL_XCCONFIG_FILENAME,
+ GLOBAL_XCCONFIG_TEMPLATE,
+ self.general_name)
+
def _content(self):
result = {}
- general_name = "conandeps.xcconfig"
- conf_name = self._config_filename()
+ conf_name = _xcconfig_settings_filename(self._conanfile.settings)
for dep in self._conanfile.dependencies.host.values():
dep_name = dep.ref.name
@@ -191,6 +223,8 @@ def _content(self):
# Include all direct build_requires for host context.
direct_deps = self._conanfile.dependencies.filter({"direct": True, "build": False})
- result[general_name] = self._all_xconfig_file(direct_deps)
+ result[self.general_name] = self._all_xconfig_file(direct_deps)
+
+ result[GLOBAL_XCCONFIG_FILENAME] = self._global_xconfig_content
return result
diff --git a/conan/tools/apple/xcodetoolchain.py b/conan/tools/apple/xcodetoolchain.py
new file mode 100644
index 00000000000..7e830dd186a
--- /dev/null
+++ b/conan/tools/apple/xcodetoolchain.py
@@ -0,0 +1,92 @@
+import os
+import textwrap
+
+from conan.tools._check_build_profile import check_using_build_profile
+from conan.tools._compilers import cppstd_flag
+from conan.tools.apple.apple import to_apple_arch
+from conan.tools.apple.xcodedeps import GLOBAL_XCCONFIG_FILENAME, GLOBAL_XCCONFIG_TEMPLATE, \
+ _add_include_to_file_or_create, _xcconfig_settings_filename, _xcconfig_conditional
+from conans.util.files import save
+
+
+class XcodeToolchain(object):
+ filename = "conantoolchain"
+ extension = ".xcconfig"
+
+ _vars_xconfig = textwrap.dedent("""\
+ // Definition of toolchain variables
+ {macosx_deployment_target}
+ {clang_cxx_library}
+ {clang_cxx_language_standard}
+ """)
+
+ _agreggated_xconfig = textwrap.dedent("""\
+ // Conan XcodeToolchain generated file
+ // Includes all installed configurations
+
+ """)
+
+ def __init__(self, conanfile):
+ self._conanfile = conanfile
+ arch = conanfile.settings.get_safe("arch")
+ self.architecture = to_apple_arch(arch) or arch
+ self.configuration = conanfile.settings.build_type
+ self.sdk = conanfile.settings.get_safe("os.sdk")
+ self.sdk_version = conanfile.settings.get_safe("os.sdk_version")
+ self.libcxx = conanfile.settings.get_safe("compiler.libcxx")
+ self.os_version = conanfile.settings.get_safe("os.version")
+ check_using_build_profile(self._conanfile)
+
+ def generate(self):
+ save(GLOBAL_XCCONFIG_FILENAME, self._global_xconfig_content)
+ save(self._agreggated_xconfig_filename, self._agreggated_xconfig_content)
+ save(self._vars_xconfig_filename, self._vars_xconfig_content)
+
+ @property
+ def _cppstd(self):
+ cppstd = cppstd_flag(self._conanfile.settings)
+ if cppstd.startswith("-std="):
+ return cppstd[5:]
+ return cppstd
+
+ @property
+ def _macosx_deployment_target(self):
+ return 'MACOSX_DEPLOYMENT_TARGET{}={}'.format(_xcconfig_conditional(self._conanfile.settings),
+ self.os_version) if self.os_version else ""
+
+ @property
+ def _clang_cxx_library(self):
+ return 'CLANG_CXX_LIBRARY{}={}'.format(_xcconfig_conditional(self._conanfile.settings),
+ self.libcxx) if self.libcxx else ""
+
+ @property
+ def _clang_cxx_language_standard(self):
+ return 'CLANG_CXX_LANGUAGE_STANDARD{}={}'.format(_xcconfig_conditional(self._conanfile.settings),
+ self._cppstd) if self._cppstd else ""
+ @property
+ def _vars_xconfig_filename(self):
+ return "conantoolchain{}{}".format(_xcconfig_settings_filename(self._conanfile.settings),
+ self.extension)
+
+ @property
+ def _vars_xconfig_content(self):
+ ret = self._vars_xconfig.format(macosx_deployment_target=self._macosx_deployment_target,
+ clang_cxx_library=self._clang_cxx_library,
+ clang_cxx_language_standard=self._clang_cxx_language_standard)
+ return ret
+
+ @property
+ def _agreggated_xconfig_content(self):
+ return _add_include_to_file_or_create(self._agreggated_xconfig_filename,
+ self._agreggated_xconfig,
+ self._vars_xconfig_filename)
+
+ @property
+ def _global_xconfig_content(self):
+ return _add_include_to_file_or_create(GLOBAL_XCCONFIG_FILENAME,
+ GLOBAL_XCCONFIG_TEMPLATE,
+ self._agreggated_xconfig_filename)
+
+ @property
+ def _agreggated_xconfig_filename(self):
+ return self.filename + self.extension
diff --git a/conan/tools/cmake/toolchain/blocks.py b/conan/tools/cmake/toolchain/blocks.py
index 8f82981bd9d..1d2919aaed4 100644
--- a/conan/tools/cmake/toolchain/blocks.py
+++ b/conan/tools/cmake/toolchain/blocks.py
@@ -349,17 +349,26 @@ def _get_architecture(self):
def _apple_sdk_name(self):
"""
- Returns the 'os.sdk' (SDK name) field value. Every user should specify it because
- there could be several ones depending on the OS architecture.
+ Returns the value for the SDKROOT with this preference:
+ - 1. The full path set in the conf with tools.apple:sdk_path
+ - 2. osd.sdk + os.sdk_version
+ Otherwise None
+ Every user should specify it because there could be several ones depending
+ on the OS architecture.
Note: In case of MacOS it'll be the same for all the architectures.
"""
os_ = self._conanfile.settings.get_safe('os')
os_sdk = self._conanfile.settings.get_safe('os.sdk')
- if os_sdk:
- return os_sdk
- elif os_ == "Macos": # it has only a single value for all the architectures for now
- return "macosx"
+ os_sdk_version = self._conanfile.settings.get_safe('os.sdk_version') or ""
+ sdk = self._conanfile.conf.get("tools.apple:sdk_path")
+
+ if sdk:
+ return sdk
+ elif os_ == "Macos": # if the host is Macos it can only be "macosx"
+ return "{}{}".format("macosx", os_sdk_version)
+ elif os_sdk:
+ return "{}{}".format(os_sdk, os_sdk_version)
else:
raise ConanException("Please, specify a suitable value for os.sdk.")
diff --git a/conans/client/conf/__init__.py b/conans/client/conf/__init__.py
index d9b16c6eb02..fddd5328837 100644
--- a/conans/client/conf/__init__.py
+++ b/conans/client/conf/__init__.py
@@ -37,6 +37,7 @@
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "12.0", "13.0"]
sdk: [None, "macosx"]
+ sdk_version: [None, "10.13", "10.14", "10.15", "11.0", "11.1", "11.3", "12.0", "12.1"]
subsystem: [None, catalyst]
Android:
api_level: ANY
@@ -46,15 +47,22 @@
"13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6", "13.7",
"14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7", "14.8", "15.0", "15.1"]
sdk: [None, "iphoneos", "iphonesimulator"]
+ sdk_version: [None, "11.3", "11.4", "12.0", "12.1", "12.2", "12.4",
+ "13.0", "13.1", "13.2", "13.4", "13.5", "13.6", "13.7",
+ "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "15.0", "15.2"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
"7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "8.0", "8.1"]
sdk: [None, "watchos", "watchsimulator"]
+ sdk_version: [None, "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
+ "7.0", "7.1", "7.2", "7.4", "8.0", "8.0.1", "8.3"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.2", "13.3", "13.4", "14.0", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7",
"15.0", "15.1"]
sdk: [None, "appletvos", "appletvsimulator"]
+ sdk_version: [None, "11.3", "11.4", "12.0", "12.1", "12.2", "12.4",
+ "13.0", "13.1", "13.2", "13.4", "14.0", "14.2", "14.3", "14.5", "15.0", "15.2"]
FreeBSD:
SunOS:
AIX:
diff --git a/conans/client/generators/__init__.py b/conans/client/generators/__init__.py
index 5b2be191b15..46629495587 100644
--- a/conans/client/generators/__init__.py
+++ b/conans/client/generators/__init__.py
@@ -72,7 +72,7 @@ def __init__(self):
"MesonToolchain", "MSBuildDeps", "QbsToolchain", "msbuild",
"VirtualRunEnv", "VirtualBuildEnv", "AutotoolsDeps",
"AutotoolsToolchain", "BazelDeps", "BazelToolchain", "PkgConfigDeps",
- "VCVars", "IntelCC", "XcodeDeps", "PremakeDeps"]
+ "VCVars", "IntelCC", "XcodeDeps", "PremakeDeps", "XcodeToolchain"]
def add(self, name, generator_class, custom=False):
if name not in self._generators or custom:
@@ -143,6 +143,9 @@ def _new_generator(self, generator_name, output):
elif generator_name == "PremakeDeps":
from conan.tools.premake import PremakeDeps
return PremakeDeps
+ elif generator_name == "XcodeToolchain":
+ from conan.tools.apple import XcodeToolchain
+ return XcodeToolchain
else:
raise ConanException("Internal Conan error: Generator '{}' "
"not commplete".format(generator_name))
diff --git a/conans/client/migrations_settings.py b/conans/client/migrations_settings.py
index 04e28167fe1..9b851c660dc 100644
--- a/conans/client/migrations_settings.py
+++ b/conans/client/migrations_settings.py
@@ -3132,6 +3132,7 @@
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "12.0", "13.0"]
sdk: [None, "macosx"]
+ sdk_version: [None, "10.13", "10.14", "10.15", "11.0", "11.1", "11.3", "12.0", "12.1"]
subsystem: [None, catalyst]
Android:
api_level: ANY
@@ -3141,15 +3142,22 @@
"13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6", "13.7",
"14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7", "14.8", "15.0", "15.1"]
sdk: [None, "iphoneos", "iphonesimulator"]
+ sdk_version: [None, "11.3", "11.4", "12.0", "12.1", "12.2", "12.4",
+ "13.0", "13.1", "13.2", "13.4", "13.5", "13.6", "13.7",
+ "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "15.0", "15.2"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
"7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "8.0", "8.1"]
sdk: [None, "watchos", "watchsimulator"]
+ sdk_version: [None, "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
+ "7.0", "7.1", "7.2", "7.4", "8.0", "8.0.1", "8.3"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.2", "13.3", "13.4", "14.0", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7",
"15.0", "15.1"]
sdk: [None, "appletvos", "appletvsimulator"]
+ sdk_version: [None, "11.3", "11.4", "12.0", "12.1", "12.2", "12.4",
+ "13.0", "13.1", "13.2", "13.4", "14.0", "14.2", "14.3", "14.5", "15.0", "15.2"]
FreeBSD:
SunOS:
AIX:
diff --git a/conans/model/conf.py b/conans/model/conf.py
index dd4f936e335..c82efce872f 100644
--- a/conans/model/conf.py
+++ b/conans/model/conf.py
@@ -40,6 +40,7 @@
"tools.system.package_manager:mode": "Mode for package_manager tools: 'check' or 'install'",
"tools.system.package_manager:sudo": "Use 'sudo' when invoking the package manager tools in Linux (False by default)",
"tools.system.package_manager:sudo_askpass": "Use the '-A' argument if using sudo in Linux to invoke the system package manager (False by default)",
+ "tools.apple.xcodebuild:verbosity": "Verbosity level for xcodebuild: 'verbose' or 'quiet"
}
| diff --git a/conans/test/functional/toolchains/apple/test_xcodebuild.py b/conans/test/functional/toolchains/apple/test_xcodebuild.py
new file mode 100644
index 00000000000..8713f6594a3
--- /dev/null
+++ b/conans/test/functional/toolchains/apple/test_xcodebuild.py
@@ -0,0 +1,435 @@
+import platform
+import textwrap
+
+import pytest
+
+from conans.test.utils.tools import TestClient
+
+pbxproj = textwrap.dedent("""
+ // !$*UTF8*$!
+ {
+ archiveVersion = 1;
+ classes = {
+ };
+ objectVersion = 55;
+ objects = {
+
+ /* Begin PBXBuildFile section */
+ 07879B4027219EE500B6FB51 /* main.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 07879B3F27219EE500B6FB51 /* main.cpp */; };
+ /* End PBXBuildFile section */
+
+ /* Begin PBXCopyFilesBuildPhase section */
+ 07879B3A27219EE500B6FB51 /* CopyFiles */ = {
+ isa = PBXCopyFilesBuildPhase;
+ buildActionMask = 2147483647;
+ dstPath = /usr/share/man/man1/;
+ dstSubfolderSpec = 0;
+ files = (
+ );
+ runOnlyForDeploymentPostprocessing = 1;
+ };
+ /* End PBXCopyFilesBuildPhase section */
+
+ /* Begin PBXFileReference section */
+ 07879B3C27219EE500B6FB51 /* app */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = app; sourceTree = BUILT_PRODUCTS_DIR; };
+ 07879B3F27219EE500B6FB51 /* main.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = main.cpp; sourceTree = "<group>"; };
+ 41608B0827BBEB2800527FAA /* conandeps.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = conandeps.xcconfig; path = conandeps.xcconfig; sourceTree = SOURCE_ROOT; };
+ 41608B0927BBEB2800527FAA /* conan_hello_vars_release_x86_64.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = conan_hello_vars_release_x86_64.xcconfig; path = conan_hello_vars_release_x86_64.xcconfig; sourceTree = SOURCE_ROOT; };
+ 41608B0A27BBEB2900527FAA /* conan_hello_debug_x86_64.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = conan_hello_debug_x86_64.xcconfig; path = conan_hello_debug_x86_64.xcconfig; sourceTree = SOURCE_ROOT; };
+ 41608B0B27BBEB2900527FAA /* conan_hello.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = conan_hello.xcconfig; path = conan_hello.xcconfig; sourceTree = SOURCE_ROOT; };
+ 41608B0C27BBEB2900527FAA /* conan_hello_vars_debug_x86_64.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = conan_hello_vars_debug_x86_64.xcconfig; path = conan_hello_vars_debug_x86_64.xcconfig; sourceTree = SOURCE_ROOT; };
+ 41608B0D27BBEB2900527FAA /* conan_hello_release_x86_64.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = conan_hello_release_x86_64.xcconfig; path = conan_hello_release_x86_64.xcconfig; sourceTree = SOURCE_ROOT; };
+ /* End PBXFileReference section */
+
+ /* Begin PBXFrameworksBuildPhase section */
+ 07879B3927219EE500B6FB51 /* Frameworks */ = {
+ isa = PBXFrameworksBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+ /* End PBXFrameworksBuildPhase section */
+
+ /* Begin PBXGroup section */
+ 07879B3327219EE500B6FB51 = {
+ isa = PBXGroup;
+ children = (
+ 07879B3E27219EE500B6FB51 /* app */,
+ 07879B3D27219EE500B6FB51 /* Products */,
+ );
+ sourceTree = "<group>";
+ };
+ 07879B3D27219EE500B6FB51 /* Products */ = {
+ isa = PBXGroup;
+ children = (
+ 07879B3C27219EE500B6FB51 /* app */,
+ );
+ name = Products;
+ sourceTree = "<group>";
+ };
+ 07879B3E27219EE500B6FB51 /* app */ = {
+ isa = PBXGroup;
+ children = (
+ 41608B0A27BBEB2900527FAA /* conan_hello_debug_x86_64.xcconfig */,
+ 41608B0D27BBEB2900527FAA /* conan_hello_release_x86_64.xcconfig */,
+ 41608B0C27BBEB2900527FAA /* conan_hello_vars_debug_x86_64.xcconfig */,
+ 41608B0927BBEB2800527FAA /* conan_hello_vars_release_x86_64.xcconfig */,
+ 41608B0B27BBEB2900527FAA /* conan_hello.xcconfig */,
+ 41608B0827BBEB2800527FAA /* conandeps.xcconfig */,
+ 07879B3F27219EE500B6FB51 /* main.cpp */,
+ );
+ path = app;
+ sourceTree = "<group>";
+ };
+ /* End PBXGroup section */
+
+ /* Begin PBXNativeTarget section */
+ 07879B3B27219EE500B6FB51 /* app */ = {
+ isa = PBXNativeTarget;
+ buildConfigurationList = 07879B4327219EE500B6FB51 /* Build configuration list for PBXNativeTarget "app" */;
+ buildPhases = (
+ 07879B3827219EE500B6FB51 /* Sources */,
+ 07879B3927219EE500B6FB51 /* Frameworks */,
+ 07879B3A27219EE500B6FB51 /* CopyFiles */,
+ );
+ buildRules = (
+ );
+ dependencies = (
+ );
+ name = app;
+ productName = app;
+ productReference = 07879B3C27219EE500B6FB51 /* app */;
+ productType = "com.apple.product-type.tool";
+ };
+ /* End PBXNativeTarget section */
+
+ /* Begin PBXProject section */
+ 07879B3427219EE500B6FB51 /* Project object */ = {
+ isa = PBXProject;
+ attributes = {
+ BuildIndependentTargetsInParallel = 1;
+ LastUpgradeCheck = 1320;
+ TargetAttributes = {
+ 07879B3B27219EE500B6FB51 = {
+ CreatedOnToolsVersion = 13.0;
+ };
+ };
+ };
+ buildConfigurationList = 07879B3727219EE500B6FB51 /* Build configuration list for PBXProject "app" */;
+ compatibilityVersion = "Xcode 13.0";
+ developmentRegion = en;
+ hasScannedForEncodings = 0;
+ knownRegions = (
+ en,
+ Base,
+ );
+ mainGroup = 07879B3327219EE500B6FB51;
+ productRefGroup = 07879B3D27219EE500B6FB51 /* Products */;
+ projectDirPath = "";
+ projectRoot = "";
+ targets = (
+ 07879B3B27219EE500B6FB51 /* app */,
+ );
+ };
+ /* End PBXProject section */
+
+ /* Begin PBXSourcesBuildPhase section */
+ 07879B3827219EE500B6FB51 /* Sources */ = {
+ isa = PBXSourcesBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 07879B4027219EE500B6FB51 /* main.cpp in Sources */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+ /* End PBXSourcesBuildPhase section */
+
+ /* Begin XCBuildConfiguration section */
+ 07879B4127219EE500B6FB51 /* Debug */ = {
+ isa = XCBuildConfiguration;
+ baseConfigurationReference = 41608B0827BBEB2800527FAA /* conandeps.xcconfig */;
+ buildSettings = {
+ ALWAYS_SEARCH_USER_PATHS = NO;
+ CLANG_ANALYZER_NONNULL = YES;
+ CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
+ CLANG_CXX_LANGUAGE_STANDARD = "gnu++17";
+ CLANG_CXX_LIBRARY = "libc++";
+ CLANG_ENABLE_MODULES = YES;
+ CLANG_ENABLE_OBJC_ARC = YES;
+ CLANG_ENABLE_OBJC_WEAK = YES;
+ CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
+ CLANG_WARN_BOOL_CONVERSION = YES;
+ CLANG_WARN_COMMA = YES;
+ CLANG_WARN_CONSTANT_CONVERSION = YES;
+ CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
+ CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
+ CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
+ CLANG_WARN_EMPTY_BODY = YES;
+ CLANG_WARN_ENUM_CONVERSION = YES;
+ CLANG_WARN_INFINITE_RECURSION = YES;
+ CLANG_WARN_INT_CONVERSION = YES;
+ CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
+ CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
+ CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
+ CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
+ CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
+ CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
+ CLANG_WARN_STRICT_PROTOTYPES = YES;
+ CLANG_WARN_SUSPICIOUS_MOVE = YES;
+ CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
+ CLANG_WARN_UNREACHABLE_CODE = YES;
+ CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+ COPY_PHASE_STRIP = NO;
+ DEBUG_INFORMATION_FORMAT = dwarf;
+ ENABLE_STRICT_OBJC_MSGSEND = YES;
+ ENABLE_TESTABILITY = YES;
+ GCC_C_LANGUAGE_STANDARD = gnu11;
+ GCC_DYNAMIC_NO_PIC = NO;
+ GCC_NO_COMMON_BLOCKS = YES;
+ GCC_OPTIMIZATION_LEVEL = 0;
+ GCC_PREPROCESSOR_DEFINITIONS = (
+ "DEBUG=1",
+ "$(inherited)",
+ );
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
+ GCC_WARN_UNDECLARED_SELECTOR = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
+ GCC_WARN_UNUSED_FUNCTION = YES;
+ GCC_WARN_UNUSED_VARIABLE = YES;
+ MACOSX_DEPLOYMENT_TARGET = 11.3;
+ MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE;
+ MTL_FAST_MATH = YES;
+ ONLY_ACTIVE_ARCH = YES;
+ SDKROOT = macosx;
+ };
+ name = Debug;
+ };
+ 07879B4227219EE500B6FB51 /* Release */ = {
+ isa = XCBuildConfiguration;
+ baseConfigurationReference = 41608B0827BBEB2800527FAA /* conandeps.xcconfig */;
+ buildSettings = {
+ ALWAYS_SEARCH_USER_PATHS = NO;
+ CLANG_ANALYZER_NONNULL = YES;
+ CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
+ CLANG_CXX_LANGUAGE_STANDARD = "gnu++17";
+ CLANG_CXX_LIBRARY = "libc++";
+ CLANG_ENABLE_MODULES = YES;
+ CLANG_ENABLE_OBJC_ARC = YES;
+ CLANG_ENABLE_OBJC_WEAK = YES;
+ CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
+ CLANG_WARN_BOOL_CONVERSION = YES;
+ CLANG_WARN_COMMA = YES;
+ CLANG_WARN_CONSTANT_CONVERSION = YES;
+ CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
+ CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
+ CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
+ CLANG_WARN_EMPTY_BODY = YES;
+ CLANG_WARN_ENUM_CONVERSION = YES;
+ CLANG_WARN_INFINITE_RECURSION = YES;
+ CLANG_WARN_INT_CONVERSION = YES;
+ CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
+ CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
+ CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
+ CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
+ CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
+ CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
+ CLANG_WARN_STRICT_PROTOTYPES = YES;
+ CLANG_WARN_SUSPICIOUS_MOVE = YES;
+ CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
+ CLANG_WARN_UNREACHABLE_CODE = YES;
+ CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+ COPY_PHASE_STRIP = NO;
+ DEBUG_INFORMATION_FORMAT = dwarf;
+ ENABLE_NS_ASSERTIONS = NO;
+ ENABLE_STRICT_OBJC_MSGSEND = YES;
+ GCC_C_LANGUAGE_STANDARD = gnu11;
+ GCC_NO_COMMON_BLOCKS = YES;
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
+ GCC_WARN_UNDECLARED_SELECTOR = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
+ GCC_WARN_UNUSED_FUNCTION = YES;
+ GCC_WARN_UNUSED_VARIABLE = YES;
+ MACOSX_DEPLOYMENT_TARGET = 11.3;
+ MTL_ENABLE_DEBUG_INFO = NO;
+ MTL_FAST_MATH = YES;
+ SDKROOT = macosx;
+ };
+ name = Release;
+ };
+ 07879B4427219EE500B6FB51 /* Debug */ = {
+ isa = XCBuildConfiguration;
+ baseConfigurationReference = 41608B0827BBEB2800527FAA /* conandeps.xcconfig */;
+ buildSettings = {
+ CODE_SIGN_IDENTITY = "-";
+ CODE_SIGN_STYLE = Automatic;
+ PRODUCT_NAME = "$(TARGET_NAME)";
+ };
+ name = Debug;
+ };
+ 07879B4527219EE500B6FB51 /* Release */ = {
+ isa = XCBuildConfiguration;
+ baseConfigurationReference = 41608B0827BBEB2800527FAA /* conandeps.xcconfig */;
+ buildSettings = {
+ CODE_SIGN_IDENTITY = "-";
+ CODE_SIGN_STYLE = Automatic;
+ PRODUCT_NAME = "$(TARGET_NAME)";
+ };
+ name = Release;
+ };
+ /* End XCBuildConfiguration section */
+
+ /* Begin XCConfigurationList section */
+ 07879B3727219EE500B6FB51 /* Build configuration list for PBXProject "app" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 07879B4127219EE500B6FB51 /* Debug */,
+ 07879B4227219EE500B6FB51 /* Release */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+ 07879B4327219EE500B6FB51 /* Build configuration list for PBXNativeTarget "app" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 07879B4427219EE500B6FB51 /* Debug */,
+ 07879B4527219EE500B6FB51 /* Release */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+ /* End XCConfigurationList section */
+ };
+ rootObject = 07879B3427219EE500B6FB51 /* Project object */;
+ }
+ """)
+
+main = textwrap.dedent("""
+ #include <iostream>
+ #include "hello.h"
+ int main(int argc, char *argv[]) {
+ hello();
+ #ifndef DEBUG
+ std::cout << "App Release!" << std::endl;
+ #else
+ std::cout << "App Debug!" << std::endl;
+ #endif
+ }
+ """)
+
+test = textwrap.dedent("""
+ import os
+ from conans import ConanFile, tools
+ class TestApp(ConanFile):
+ settings = "os", "compiler", "build_type", "arch"
+ generators = "VirtualRunEnv"
+ def test(self):
+ if not tools.cross_building(self):
+ self.run("app", env="conanrun")
+ """)
+
+
+@pytest.fixture(scope="module")
+def client():
+ client = TestClient()
+ client.run("new hello/0.1 -m=cmake_lib")
+ client.run("create . -s build_type=Release")
+ client.run("create . -s build_type=Debug")
+ return client
+
+
+@pytest.mark.skipif(platform.system() != "Darwin", reason="Only for MacOS")
+@pytest.mark.tool_xcodebuild
+def test_project_xcodebuild(client):
+
+ conanfile = textwrap.dedent("""
+ from conans import ConanFile
+ from conan.tools.apple import XcodeBuild
+ class MyApplicationConan(ConanFile):
+ name = "myapplication"
+ version = "1.0"
+ requires = "hello/0.1"
+ settings = "os", "compiler", "build_type", "arch"
+ generators = "XcodeDeps"
+ exports_sources = "app.xcodeproj/*", "app/*"
+ def build(self):
+ xcode = XcodeBuild(self)
+ xcode.build("app.xcodeproj")
+
+ def package(self):
+ self.copy("*/app", dst="bin", src=".", keep_path=False)
+
+ def package_info(self):
+ self.cpp_info.bindirs = ["bin"]
+ """)
+
+ client.save({"conanfile.py": conanfile,
+ "test_package/conanfile.py": test,
+ "app/main.cpp": main,
+ "app.xcodeproj/project.pbxproj": pbxproj}, clean_first=True)
+ client.run("create . --build=missing")
+ assert "hello/0.1: Hello World Release!" in client.out
+ assert "App Release!" in client.out
+ client.run("create . -s build_type=Debug --build=missing")
+ assert "hello/0.1: Hello World Debug!" in client.out
+ assert "App Debug!" in client.out
+
+
+@pytest.mark.skipif(platform.system() != "Darwin", reason="Only for MacOS")
+@pytest.mark.tool_xcodebuild
+@pytest.mark.skip(reason="Different sdks not installed in CI")
+def test_xcodebuild_test_different_sdk(client):
+
+ conanfile = textwrap.dedent("""
+ from conans import ConanFile
+ from conan.tools.apple import XcodeBuild
+ class MyApplicationConan(ConanFile):
+ name = "myapplication"
+ version = "1.0"
+ requires = "hello/0.1"
+ settings = "os", "compiler", "build_type", "arch"
+ generators = "XcodeDeps"
+ exports_sources = "app.xcodeproj/*", "app/*"
+ def build(self):
+ xcode = XcodeBuild(self)
+ xcode.build("app.xcodeproj")
+ self.run("otool -l build/Release/app")
+ """)
+
+ client.save({"conanfile.py": conanfile,
+ "app/main.cpp": main,
+ "app.xcodeproj/project.pbxproj": pbxproj}, clean_first=True)
+ client.run("create . --build=missing -s os.sdk=macosx -s os.sdk_version=10.15 "
+ "-c tools.apple:sdk_path='/Applications/Xcode11.7.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.15.sdk'")
+ assert "sdk 10.15.6" in client.out
+ client.run("create . --build=missing -s os.sdk=macosx -s os.sdk_version=11.3 "
+ "-c tools.apple:sdk_path='/Applications/Xcode12.5.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk'")
+ assert "sdk 11.3" in client.out
+
+
+@pytest.mark.skipif(platform.system() != "Darwin", reason="Only for MacOS")
+@pytest.mark.tool_xcodebuild
+def test_missing_sdk(client):
+
+ conanfile = textwrap.dedent("""
+ from conans import ConanFile
+ from conan.tools.apple import XcodeBuild
+ class MyApplicationConan(ConanFile):
+ name = "myapplication"
+ version = "1.0"
+ requires = "hello/0.1"
+ settings = "os", "compiler", "build_type", "arch"
+ generators = "XcodeDeps"
+ exports_sources = "app.xcodeproj/*", "app/*"
+ def build(self):
+ xcode = XcodeBuild(self)
+ xcode.build("app.xcodeproj")
+ """)
+
+ client.save({"conanfile.py": conanfile,
+ "app/main.cpp": main,
+ "app.xcodeproj/project.pbxproj": pbxproj}, clean_first=True)
+ client.run("create . --build=missing -s os.sdk=macosx -s os.sdk_version=12.0 "
+ "-c tools.apple:sdk_path=notexistingsdk", assert_error=True)
diff --git a/conans/test/functional/toolchains/apple/test_xcodedeps_build_configs.py b/conans/test/functional/toolchains/apple/test_xcodedeps_build_configs.py
index ca40fab3004..818a5cfd5a6 100644
--- a/conans/test/functional/toolchains/apple/test_xcodedeps_build_configs.py
+++ b/conans/test/functional/toolchains/apple/test_xcodedeps_build_configs.py
@@ -231,7 +231,7 @@ def create_xcode_project(client, project_name, source):
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
COPY_PHASE_STRIP = NO;
- DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+ DEBUG_INFORMATION_FORMAT = dwarf;
ENABLE_NS_ASSERTIONS = NO;
ENABLE_STRICT_OBJC_MSGSEND = YES;
GCC_C_LANGUAGE_STANDARD = gnu11;
@@ -300,7 +300,8 @@ def create_xcode_project(client, project_name, source):
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only for MacOS")
-@pytest.mark.tool_cmake()
+@pytest.mark.tool_cmake
+@pytest.mark.tool_xcodebuild
def test_xcodedeps_build_configurations():
client = TestClient(path_with_spaces=False)
@@ -342,7 +343,8 @@ def test_xcodedeps_build_configurations():
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only for MacOS")
-@pytest.mark.tool_cmake()
+@pytest.mark.tool_cmake
+@pytest.mark.tool_xcodebuild
def test_frameworks():
client = TestClient(path_with_spaces=False)
@@ -372,6 +374,7 @@ def test_frameworks():
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only for MacOS")
+@pytest.mark.tool_xcodebuild
def test_xcodedeps_dashes_names_and_arch():
# https://github.com/conan-io/conan/issues/9949
client = TestClient(path_with_spaces=False)
diff --git a/conans/test/functional/toolchains/apple/test_xcodetoolchain.py b/conans/test/functional/toolchains/apple/test_xcodetoolchain.py
new file mode 100644
index 00000000000..29772e794d8
--- /dev/null
+++ b/conans/test/functional/toolchains/apple/test_xcodetoolchain.py
@@ -0,0 +1,382 @@
+import platform
+import textwrap
+
+import pytest
+
+from conans.test.assets.sources import gen_function_cpp
+from conans.test.utils.tools import TestClient
+
+pbxproj = textwrap.dedent("""
+ // !$*UTF8*$!
+ {
+ archiveVersion = 1;
+ classes = {
+ };
+ objectVersion = 55;
+ objects = {
+
+ /* Begin PBXBuildFile section */
+ 07879B4027219EE500B6FB51 /* main.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 07879B3F27219EE500B6FB51 /* main.cpp */; };
+ /* End PBXBuildFile section */
+
+ /* Begin PBXCopyFilesBuildPhase section */
+ 07879B3A27219EE500B6FB51 /* CopyFiles */ = {
+ isa = PBXCopyFilesBuildPhase;
+ buildActionMask = 2147483647;
+ dstPath = /usr/share/man/man1/;
+ dstSubfolderSpec = 0;
+ files = (
+ );
+ runOnlyForDeploymentPostprocessing = 1;
+ };
+ /* End PBXCopyFilesBuildPhase section */
+
+ /* Begin PBXFileReference section */
+ 07879B3C27219EE500B6FB51 /* app */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = app; sourceTree = BUILT_PRODUCTS_DIR; };
+ 07879B3F27219EE500B6FB51 /* main.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = main.cpp; sourceTree = "<group>"; };
+ 4130DB5F27BE8D0300BDEE84 /* conan_hello_release_x86_64_macosx_12_1.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conan_hello_release_x86_64_macosx_12_1.xcconfig; sourceTree = "<group>"; };
+ 4130DB6027BE8D0300BDEE84 /* conan_hello_vars_debug_x86_64_macosx_12_1.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conan_hello_vars_debug_x86_64_macosx_12_1.xcconfig; sourceTree = "<group>"; };
+ 4130DB6127BE8D0300BDEE84 /* conantoolchain_debug_x86_64_macosx_12_1.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conantoolchain_debug_x86_64_macosx_12_1.xcconfig; sourceTree = "<group>"; };
+ 4130DB6227BE8D0300BDEE84 /* conantoolchain_release_x86_64_macosx_12_1.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conantoolchain_release_x86_64_macosx_12_1.xcconfig; sourceTree = "<group>"; };
+ 4130DB6327BE8D0300BDEE84 /* conan_hello.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conan_hello.xcconfig; sourceTree = "<group>"; };
+ 4130DB6427BE8D0300BDEE84 /* conan_hello_vars_release_x86_64_macosx_12_1.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conan_hello_vars_release_x86_64_macosx_12_1.xcconfig; sourceTree = "<group>"; };
+ 4130DB6527BE8D0300BDEE84 /* conan_hello_debug_x86_64_macosx_12_1.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conan_hello_debug_x86_64_macosx_12_1.xcconfig; sourceTree = "<group>"; };
+ 4130DB6627BE8D0300BDEE84 /* conan_config.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conan_config.xcconfig; sourceTree = "<group>"; };
+ 4130DB6727BE8D0300BDEE84 /* conantoolchain.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conantoolchain.xcconfig; sourceTree = "<group>"; };
+ 4130DB6827BE8D0300BDEE84 /* conandeps.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = conandeps.xcconfig; sourceTree = "<group>"; };
+ /* End PBXFileReference section */
+
+ /* Begin PBXFrameworksBuildPhase section */
+ 07879B3927219EE500B6FB51 /* Frameworks */ = {
+ isa = PBXFrameworksBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+ /* End PBXFrameworksBuildPhase section */
+
+ /* Begin PBXGroup section */
+ 07879B3327219EE500B6FB51 = {
+ isa = PBXGroup;
+ children = (
+ 4130DB6927BE8D0D00BDEE84 /* conan */,
+ 07879B3E27219EE500B6FB51 /* app */,
+ 07879B3D27219EE500B6FB51 /* Products */,
+ );
+ sourceTree = "<group>";
+ };
+ 07879B3D27219EE500B6FB51 /* Products */ = {
+ isa = PBXGroup;
+ children = (
+ 07879B3C27219EE500B6FB51 /* app */,
+ );
+ name = Products;
+ sourceTree = "<group>";
+ };
+ 07879B3E27219EE500B6FB51 /* app */ = {
+ isa = PBXGroup;
+ children = (
+ 07879B3F27219EE500B6FB51 /* main.cpp */,
+ );
+ path = app;
+ sourceTree = "<group>";
+ };
+ 4130DB6927BE8D0D00BDEE84 /* conan */ = {
+ isa = PBXGroup;
+ children = (
+ 4130DB6627BE8D0300BDEE84 /* conan_config.xcconfig */,
+ 4130DB6527BE8D0300BDEE84 /* conan_hello_debug_x86_64_macosx_12_1.xcconfig */,
+ 4130DB5F27BE8D0300BDEE84 /* conan_hello_release_x86_64_macosx_12_1.xcconfig */,
+ 4130DB6027BE8D0300BDEE84 /* conan_hello_vars_debug_x86_64_macosx_12_1.xcconfig */,
+ 4130DB6427BE8D0300BDEE84 /* conan_hello_vars_release_x86_64_macosx_12_1.xcconfig */,
+ 4130DB6327BE8D0300BDEE84 /* conan_hello.xcconfig */,
+ 4130DB6827BE8D0300BDEE84 /* conandeps.xcconfig */,
+ 4130DB6127BE8D0300BDEE84 /* conantoolchain_debug_x86_64_macosx_12_1.xcconfig */,
+ 4130DB6227BE8D0300BDEE84 /* conantoolchain_release_x86_64_macosx_12_1.xcconfig */,
+ 4130DB6727BE8D0300BDEE84 /* conantoolchain.xcconfig */,
+ );
+ name = conan;
+ sourceTree = "<group>";
+ };
+ /* End PBXGroup section */
+
+ /* Begin PBXNativeTarget section */
+ 07879B3B27219EE500B6FB51 /* app */ = {
+ isa = PBXNativeTarget;
+ buildConfigurationList = 07879B4327219EE500B6FB51 /* Build configuration list for PBXNativeTarget "app" */;
+ buildPhases = (
+ 07879B3827219EE500B6FB51 /* Sources */,
+ 07879B3927219EE500B6FB51 /* Frameworks */,
+ 07879B3A27219EE500B6FB51 /* CopyFiles */,
+ );
+ buildRules = (
+ );
+ dependencies = (
+ );
+ name = app;
+ productName = app;
+ productReference = 07879B3C27219EE500B6FB51 /* app */;
+ productType = "com.apple.product-type.tool";
+ };
+ /* End PBXNativeTarget section */
+
+ /* Begin PBXProject section */
+ 07879B3427219EE500B6FB51 /* Project object */ = {
+ isa = PBXProject;
+ attributes = {
+ BuildIndependentTargetsInParallel = 1;
+ LastUpgradeCheck = 1320;
+ TargetAttributes = {
+ 07879B3B27219EE500B6FB51 = {
+ CreatedOnToolsVersion = 13.0;
+ };
+ };
+ };
+ buildConfigurationList = 07879B3727219EE500B6FB51 /* Build configuration list for PBXProject "app" */;
+ compatibilityVersion = "Xcode 13.0";
+ developmentRegion = en;
+ hasScannedForEncodings = 0;
+ knownRegions = (
+ en,
+ Base,
+ );
+ mainGroup = 07879B3327219EE500B6FB51;
+ productRefGroup = 07879B3D27219EE500B6FB51 /* Products */;
+ projectDirPath = "";
+ projectRoot = "";
+ targets = (
+ 07879B3B27219EE500B6FB51 /* app */,
+ );
+ };
+ /* End PBXProject section */
+
+ /* Begin PBXSourcesBuildPhase section */
+ 07879B3827219EE500B6FB51 /* Sources */ = {
+ isa = PBXSourcesBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 07879B4027219EE500B6FB51 /* main.cpp in Sources */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+ /* End PBXSourcesBuildPhase section */
+
+ /* Begin XCBuildConfiguration section */
+ 07879B4127219EE500B6FB51 /* Debug */ = {
+ isa = XCBuildConfiguration;
+ baseConfigurationReference = 4130DB6627BE8D0300BDEE84 /* conan_config.xcconfig */;
+ buildSettings = {
+ ALWAYS_SEARCH_USER_PATHS = NO;
+ ARCHS = x86_64;
+ CLANG_ANALYZER_NONNULL = YES;
+ CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
+ CLANG_ENABLE_MODULES = YES;
+ CLANG_ENABLE_OBJC_ARC = YES;
+ CLANG_ENABLE_OBJC_WEAK = YES;
+ CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
+ CLANG_WARN_BOOL_CONVERSION = YES;
+ CLANG_WARN_COMMA = YES;
+ CLANG_WARN_CONSTANT_CONVERSION = YES;
+ CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
+ CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
+ CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
+ CLANG_WARN_EMPTY_BODY = YES;
+ CLANG_WARN_ENUM_CONVERSION = YES;
+ CLANG_WARN_INFINITE_RECURSION = YES;
+ CLANG_WARN_INT_CONVERSION = YES;
+ CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
+ CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
+ CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
+ CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
+ CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
+ CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
+ CLANG_WARN_STRICT_PROTOTYPES = YES;
+ CLANG_WARN_SUSPICIOUS_MOVE = YES;
+ CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
+ CLANG_WARN_UNREACHABLE_CODE = YES;
+ CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+ COPY_PHASE_STRIP = NO;
+ DEBUG_INFORMATION_FORMAT = dwarf;
+ ENABLE_STRICT_OBJC_MSGSEND = YES;
+ ENABLE_TESTABILITY = YES;
+ GCC_C_LANGUAGE_STANDARD = gnu11;
+ GCC_DYNAMIC_NO_PIC = NO;
+ GCC_NO_COMMON_BLOCKS = YES;
+ GCC_OPTIMIZATION_LEVEL = 0;
+ GCC_PREPROCESSOR_DEFINITIONS = (
+ "DEBUG=1",
+ "$(inherited)",
+ );
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
+ GCC_WARN_UNDECLARED_SELECTOR = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
+ GCC_WARN_UNUSED_FUNCTION = YES;
+ GCC_WARN_UNUSED_VARIABLE = YES;
+ MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE;
+ MTL_FAST_MATH = YES;
+ ONLY_ACTIVE_ARCH = YES;
+ SDKROOT = macosx12.1;
+ };
+ name = Debug;
+ };
+ 07879B4227219EE500B6FB51 /* Release */ = {
+ isa = XCBuildConfiguration;
+ baseConfigurationReference = 4130DB6627BE8D0300BDEE84 /* conan_config.xcconfig */;
+ buildSettings = {
+ ALWAYS_SEARCH_USER_PATHS = NO;
+ ARCHS = x86_64;
+ CLANG_ANALYZER_NONNULL = YES;
+ CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
+ CLANG_ENABLE_MODULES = YES;
+ CLANG_ENABLE_OBJC_ARC = YES;
+ CLANG_ENABLE_OBJC_WEAK = YES;
+ CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
+ CLANG_WARN_BOOL_CONVERSION = YES;
+ CLANG_WARN_COMMA = YES;
+ CLANG_WARN_CONSTANT_CONVERSION = YES;
+ CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
+ CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
+ CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
+ CLANG_WARN_EMPTY_BODY = YES;
+ CLANG_WARN_ENUM_CONVERSION = YES;
+ CLANG_WARN_INFINITE_RECURSION = YES;
+ CLANG_WARN_INT_CONVERSION = YES;
+ CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
+ CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
+ CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
+ CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
+ CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
+ CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
+ CLANG_WARN_STRICT_PROTOTYPES = YES;
+ CLANG_WARN_SUSPICIOUS_MOVE = YES;
+ CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
+ CLANG_WARN_UNREACHABLE_CODE = YES;
+ CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
+ COPY_PHASE_STRIP = NO;
+ DEBUG_INFORMATION_FORMAT = dwarf;
+ ENABLE_NS_ASSERTIONS = NO;
+ ENABLE_STRICT_OBJC_MSGSEND = YES;
+ GCC_C_LANGUAGE_STANDARD = gnu11;
+ GCC_NO_COMMON_BLOCKS = YES;
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
+ GCC_WARN_UNDECLARED_SELECTOR = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
+ GCC_WARN_UNUSED_FUNCTION = YES;
+ GCC_WARN_UNUSED_VARIABLE = YES;
+ MTL_ENABLE_DEBUG_INFO = NO;
+ MTL_FAST_MATH = YES;
+ ONLY_ACTIVE_ARCH = YES;
+ SDKROOT = macosx12.1;
+ };
+ name = Release;
+ };
+ 07879B4427219EE500B6FB51 /* Debug */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ CODE_SIGN_IDENTITY = "-";
+ CODE_SIGN_STYLE = Automatic;
+ PRODUCT_NAME = "$(TARGET_NAME)";
+ };
+ name = Debug;
+ };
+ 07879B4527219EE500B6FB51 /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ CODE_SIGN_IDENTITY = "-";
+ CODE_SIGN_STYLE = Automatic;
+ PRODUCT_NAME = "$(TARGET_NAME)";
+ };
+ name = Release;
+ };
+ /* End XCBuildConfiguration section */
+
+ /* Begin XCConfigurationList section */
+ 07879B3727219EE500B6FB51 /* Build configuration list for PBXProject "app" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 07879B4127219EE500B6FB51 /* Debug */,
+ 07879B4227219EE500B6FB51 /* Release */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+ 07879B4327219EE500B6FB51 /* Build configuration list for PBXNativeTarget "app" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 07879B4427219EE500B6FB51 /* Debug */,
+ 07879B4527219EE500B6FB51 /* Release */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+ /* End XCConfigurationList section */
+ };
+ rootObject = 07879B3427219EE500B6FB51 /* Project object */;
+ }
+ """)
+
+test = textwrap.dedent("""
+ import os
+ from conans import ConanFile, tools
+ class TestApp(ConanFile):
+ settings = "os", "compiler", "build_type", "arch"
+ generators = "VirtualRunEnv"
+ def test(self):
+ if not tools.cross_building(self):
+ self.run("app", env="conanrun")
+ """)
+
+
+@pytest.mark.skipif(platform.system() != "Darwin", reason="Only for MacOS")
+@pytest.mark.tool_xcodebuild
+@pytest.mark.parametrize("cppstd, cppstd_output, min_version", [
+ ("gnu14", "__cplusplus201402", "11.0"),
+ ("gnu17", "__cplusplus201703", "11.0"),
+ ("gnu17", "__cplusplus201703", "10.15")
+])
+def test_project_xcodetoolchain(cppstd, cppstd_output, min_version):
+
+ client = TestClient()
+ client.run("new hello/0.1 -m=cmake_lib")
+ client.run("export .")
+
+ conanfile = textwrap.dedent("""
+ from conans import ConanFile
+ from conan.tools.apple import XcodeBuild
+ class MyApplicationConan(ConanFile):
+ name = "myapplication"
+ version = "1.0"
+ requires = "hello/0.1"
+ settings = "os", "compiler", "build_type", "arch"
+ generators = "XcodeDeps", "XcodeToolchain"
+ exports_sources = "app.xcodeproj/*", "app/*"
+ def build(self):
+ xcode = XcodeBuild(self)
+ xcode.build("app.xcodeproj")
+ self.run("otool -l build/{}/app".format(self.settings.build_type))
+
+ def package(self):
+ self.copy("*/app", dst="bin", src=".", keep_path=False)
+
+ def package_info(self):
+ self.cpp_info.bindirs = ["bin"]
+ """)
+
+ client.save({"conanfile.py": conanfile,
+ "test_package/conanfile.py": test,
+ "app/main.cpp": gen_function_cpp(name="main", includes=["hello"], calls=["hello"]),
+ "app.xcodeproj/project.pbxproj": pbxproj}, clean_first=True)
+
+ sdk_version = "11.3"
+ settings = "-s arch=x86_64 -s os.sdk=macosx -s os.sdk_version={} -s compiler.cppstd={} " \
+ "-s compiler.libcxx=libc++ -s os.version={} ".format(sdk_version, cppstd, min_version)
+
+ client.run("create . -s build_type=Release {} --build=missing".format(settings))
+ assert "main __x86_64__ defined" in client.out
+ assert "main {}".format(cppstd_output) in client.out
+ assert "minos {}".format(min_version) in client.out
+ assert "sdk {}".format(sdk_version) in client.out
+ assert "libc++" in client.out
diff --git a/conans/test/integration/toolchains/apple/test_xcodedeps.py b/conans/test/integration/toolchains/apple/test_xcodedeps.py
index 009717bedd8..dfa51264871 100644
--- a/conans/test/integration/toolchains/apple/test_xcodedeps.py
+++ b/conans/test/integration/toolchains/apple/test_xcodedeps.py
@@ -4,6 +4,7 @@
import pytest
from conans.test.assets.genconanfile import GenConanfile
+from conans.test.integration.toolchains.apple.test_xcodetoolchain import _get_filename
from conans.test.utils.tools import TestClient
_expected_dep_xconfig = [
@@ -17,18 +18,18 @@
]
_expected_vars_xconfig = [
- "CONAN_{name}_BINARY_DIRECTORIES[config={configuration}][arch={architecture}] =",
- "CONAN_{name}_C_COMPILER_FLAGS[config={configuration}][arch={architecture}] =",
- "CONAN_{name}_CXX_COMPILER_FLAGS[config={configuration}][arch={architecture}] =",
- "CONAN_{name}_LINKER_FLAGS[config={configuration}][arch={architecture}] =",
- "CONAN_{name}_PREPROCESSOR_DEFINITIONS[config={configuration}][arch={architecture}] =",
- "CONAN_{name}_INCLUDE_DIRECTORIES[config={configuration}][arch={architecture}] =",
- "CONAN_{name}_RESOURCE_DIRECTORIES[config={configuration}][arch={architecture}] =",
- "CONAN_{name}_LIBRARY_DIRECTORIES[config={configuration}][arch={architecture}] =",
- "CONAN_{name}_LIBRARIES[config={configuration}][arch={architecture}] = -l{name}",
- "CONAN_{name}_SYSTEM_LIBS[config={configuration}][arch={architecture}] =",
- "CONAN_{name}_FRAMEWORKS_DIRECTORIES[config={configuration}][arch={architecture}] =",
- "CONAN_{name}_FRAMEWORKS[config={configuration}][arch={architecture}] = -framework framework_{name}"
+ "CONAN_{name}_BINARY_DIRECTORIES[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] =",
+ "CONAN_{name}_C_COMPILER_FLAGS[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] =",
+ "CONAN_{name}_CXX_COMPILER_FLAGS[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] =",
+ "CONAN_{name}_LINKER_FLAGS[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] =",
+ "CONAN_{name}_PREPROCESSOR_DEFINITIONS[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] =",
+ "CONAN_{name}_INCLUDE_DIRECTORIES[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] =",
+ "CONAN_{name}_RESOURCE_DIRECTORIES[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] =",
+ "CONAN_{name}_LIBRARY_DIRECTORIES[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] =",
+ "CONAN_{name}_LIBRARIES[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = -l{name}",
+ "CONAN_{name}_SYSTEM_LIBS[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] =",
+ "CONAN_{name}_FRAMEWORKS_DIRECTORIES[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] =",
+ "CONAN_{name}_FRAMEWORKS[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = -framework framework_{name}"
]
_expected_conf_xconfig = [
@@ -43,16 +44,9 @@
]
-def get_name(configuration, architecture):
- props = [("configuration", configuration),
- ("architecture", architecture)]
- name = "".join("_{}".format(v) for _, v in props if v is not None)
- return name.lower()
-
-
-def expected_files(current_folder, configuration, architecture):
+def expected_files(current_folder, configuration, architecture, sdk, sdk_version):
files = []
- name = get_name(configuration, architecture)
+ name = _get_filename(configuration, architecture, sdk, sdk_version)
deps = ["hello", "goodbye"]
files.extend(
[os.path.join(current_folder, "conan_{}{}.xcconfig".format(dep, name)) for dep in deps])
@@ -62,11 +56,11 @@ def expected_files(current_folder, configuration, architecture):
return files
-def check_contents(client, deps, configuration, architecture):
+def check_contents(client, deps, configuration, architecture, sdk, sdk_version):
for dep_name in deps:
dep_xconfig = client.load("conan_{}.xcconfig".format(dep_name))
conf_name = "conan_{}{}.xcconfig".format(dep_name,
- get_name(configuration, architecture))
+ _get_filename(configuration, architecture, sdk, sdk_version))
assert '#include "{}"'.format(conf_name) in dep_xconfig
for var in _expected_dep_xconfig:
@@ -74,10 +68,11 @@ def check_contents(client, deps, configuration, architecture):
assert line in dep_xconfig
vars_name = "conan_{}_vars{}.xcconfig".format(dep_name,
- get_name(configuration, architecture))
+ _get_filename(configuration, architecture, sdk, sdk_version))
conan_vars = client.load(vars_name)
for var in _expected_vars_xconfig:
- line = var.format(name=dep_name, configuration=configuration, architecture=architecture)
+ line = var.format(name=dep_name, configuration=configuration, architecture=architecture,
+ sdk=sdk, sdk_version=sdk_version)
assert line in conan_vars
conan_conf = client.load(conf_name)
@@ -102,13 +97,16 @@ def test_generator_files():
for build_type in ["Release", "Debug"]:
- client.run("install . -g XcodeDeps -s build_type={} -s arch=x86_64 --build missing".format(build_type))
+ client.run("install . -g XcodeDeps -s build_type={} -s arch=x86_64 -s os.sdk=macosx -s os.sdk_version=12.1 --build missing".format(build_type))
- for config_file in expected_files(client.current_folder, build_type, "x86_64"):
+ for config_file in expected_files(client.current_folder, build_type, "x86_64", "macosx", "12.1"):
assert os.path.isfile(config_file)
conandeps = client.load("conandeps.xcconfig")
assert '#include "conan_hello.xcconfig"' in conandeps
assert '#include "conan_goodbye.xcconfig"' in conandeps
- check_contents(client, ["hello", "goodbye"], build_type, "x86_64")
+ conan_config = client.load("conan_config.xcconfig")
+ assert '#include "conandeps.xcconfig"' in conan_config
+
+ check_contents(client, ["hello", "goodbye"], build_type, "x86_64", "macosx", "12.1")
diff --git a/conans/test/integration/toolchains/apple/test_xcodetoolchain.py b/conans/test/integration/toolchains/apple/test_xcodetoolchain.py
new file mode 100644
index 00000000000..b38a8419ed1
--- /dev/null
+++ b/conans/test/integration/toolchains/apple/test_xcodetoolchain.py
@@ -0,0 +1,55 @@
+import platform
+
+import pytest
+
+from conans.test.utils.tools import TestClient
+
+
+def _get_filename(configuration, architecture, sdk, sdk_version):
+ props = [("configuration", configuration),
+ ("architecture", architecture),
+ ("sdk name", sdk),
+ ("sdk version", sdk_version)]
+ name = "".join("_{}".format(v) for _, v in props if v is not None and v)
+ name = name.replace(".", "_").replace("-", "_")
+ return name.lower()
+
+
+def _condition(configuration, architecture, sdk_name, sdk_version):
+ sdk = "{}{}".format(sdk_name, sdk_version or "*")
+ return "[config={}][arch={}][sdk={}]".format(configuration, architecture, sdk)
+
+
+@pytest.mark.skipif(platform.system() != "Darwin", reason="Only for MacOS")
+@pytest.mark.parametrize("configuration, os_version, libcxx, cppstd, arch, sdk_name, sdk_version, clang_cppstd", [
+ ("Release", "", "", "", "x86_64", "", "", ""),
+ ("Release", "12.0", "libc++", "20", "x86_64", "", "", "c++2a"),
+ ("Debug", "12.0", "libc++", "20", "x86_64", "", "", "c++2a"),
+ ("Release", "12.0", "libc++", "20", "x86_64", "macosx", "11.3", "c++2a"),
+ ("Release", "12.0", "libc++", "20", "x86_64", "macosx", "", "c++2a"),
+])
+def test_toolchain_files(configuration, os_version, cppstd, libcxx, arch, sdk_name, sdk_version, clang_cppstd):
+ client = TestClient()
+ client.save({"conanfile.txt": "[generators]\nXcodeToolchain\n"})
+ cmd = "install . -s build_type={}".format(configuration)
+ cmd = cmd + " -s os.version={}".format(os_version) if os_version else cmd
+ cmd = cmd + " -s compiler.cppstd={}".format(cppstd) if cppstd else cmd
+ cmd = cmd + " -s os.sdk={}".format(sdk_name) if sdk_name else cmd
+ cmd = cmd + " -s os.sdk_version={}".format(sdk_version) if sdk_version else cmd
+ client.run(cmd)
+ filename = _get_filename(configuration, arch, sdk_name, sdk_version)
+ condition = _condition(configuration, arch, sdk_name, sdk_version)
+
+ toolchain_all = client.load("conantoolchain.xcconfig")
+ toolchain_vars = client.load("conantoolchain{}.xcconfig".format(filename))
+ conan_config = client.load("conan_config.xcconfig")
+
+ assert '#include "conantoolchain.xcconfig"' in conan_config
+ assert '#include "conantoolchain{}.xcconfig"'.format(filename) in toolchain_all
+
+ if libcxx:
+ assert 'CLANG_CXX_LIBRARY{}={}'.format(condition, libcxx) in toolchain_vars
+ if os_version:
+ assert 'MACOSX_DEPLOYMENT_TARGET{}={}'.format(condition, os_version) in toolchain_vars
+ if cppstd:
+ assert 'CLANG_CXX_LANGUAGE_STANDARD{}={}'.format(condition, clang_cppstd) in toolchain_vars
diff --git a/conans/test/unittests/client/tools/apple/test_xcodebuild.py b/conans/test/unittests/client/tools/apple/test_xcodebuild.py
new file mode 100644
index 00000000000..f5cdbbdabb8
--- /dev/null
+++ b/conans/test/unittests/client/tools/apple/test_xcodebuild.py
@@ -0,0 +1,63 @@
+import pytest
+
+from conan.tools.apple import XcodeBuild
+from conans.errors import ConanException
+from conans.model.conf import ConfDefinition
+from conans.test.utils.mocks import ConanFileMock, MockSettings
+
+
+@pytest.mark.parametrize("mode", ["quiet", "verbose", "invalid"])
+def test_verbosity(mode):
+ conanfile = ConanFileMock()
+ conf = ConfDefinition()
+ conf.loads("tools.apple.xcodebuild:verbosity={}".format(mode))
+ conanfile.conf = conf
+ conanfile.settings = MockSettings({})
+ xcodebuild = XcodeBuild(conanfile)
+ if mode != "invalid":
+ xcodebuild.build("app.xcodeproj")
+ assert "-{}".format(mode) in conanfile.command
+ else:
+ with pytest.raises(ConanException) as excinfo:
+ xcodebuild.build("app.xcodeproj")
+ assert "Value {} for 'tools.apple.xcodebuild:verbosity' is not valid".format(mode) == str(
+ excinfo.value)
+
+
+def test_sdk_path():
+ conanfile = ConanFileMock()
+ conf = ConfDefinition()
+ conf.loads("tools.apple:sdk_path=mypath")
+ conanfile.conf = conf
+ conanfile.settings = MockSettings({})
+ xcodebuild = XcodeBuild(conanfile)
+ xcodebuild.build("app.xcodeproj")
+ assert "SDKROOT=mypath " in conanfile.command
+
+
+def test_sdk():
+ conanfile = ConanFileMock()
+ conf = ConfDefinition()
+ conf.loads("tools.apple:sdk_path=mypath")
+ conanfile.conf = conf
+ conanfile.settings = MockSettings({"os": "Macos",
+ "os.sdk": "macosx"})
+ xcodebuild = XcodeBuild(conanfile)
+ xcodebuild.build("app.xcodeproj")
+ # sdk_path takes preference
+ assert "SDKROOT=mypath " in conanfile.command
+ conf = ConfDefinition()
+ conanfile.conf = conf
+ xcodebuild = XcodeBuild(conanfile)
+ xcodebuild.build("app.xcodeproj")
+ assert "SDKROOT=macosx " in conanfile.command
+ conanfile.settings = MockSettings({"os": "Macos",
+ "os.sdk": "macosx",
+ "os.sdk_version": "12.1"})
+ xcodebuild = XcodeBuild(conanfile)
+ xcodebuild.build("app.xcodeproj")
+ assert "SDKROOT=macosx12.1 " in conanfile.command
+ conanfile.settings = MockSettings({})
+ xcodebuild = XcodeBuild(conanfile)
+ xcodebuild.build("app.xcodeproj")
+ assert "SDKROOT" not in conanfile.command
| [
{
"components": [
{
"doc": "",
"lines": [
7,
37
],
"name": "XcodeBuild",
"signature": "class XcodeBuild(object):",
"type": "class"
},
{
"doc": "",
"lines": [
8,
13
],
"name":... | [
"conans/test/unittests/client/tools/apple/test_xcodebuild.py::test_verbosity[quiet]",
"conans/test/unittests/client/tools/apple/test_xcodebuild.py::test_verbosity[verbose]",
"conans/test/unittests/client/tools/apple/test_xcodebuild.py::test_verbosity[invalid]",
"conans/test/unittests/client/tools/apple/test_x... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add new sdk_version setting for Apple OS's and new XcodeBuild, XcodeToolchain tools
Changelog: Feature: Add `sdk_version` setting for `Macos`, `iOS`, `watchOS` and `tvOS`.
Changelog: Feature: Add new `XcodeBuild` build helper.
Changelog: Feature: Add new `XcodeToolchain` helper.
Docs: https://github.com/conan-io/docs/pull/2431
Closes: https://github.com/conan-io/conan/issues/9608
Superseeds: https://github.com/conan-io/conan/pull/9873
#TAGS: slow
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/apple/xcodebuild.py]
(definition of XcodeBuild:)
class XcodeBuild(object):
(definition of XcodeBuild.__init__:)
def __init__(self, conanfile):
(definition of XcodeBuild._verbosity:)
def _verbosity(self):
(definition of XcodeBuild._sdkroot:)
def _sdkroot(self):
(definition of XcodeBuild.build:)
def build(self, xcodeproj):
[end of new definitions in conan/tools/apple/xcodebuild.py]
[start of new definitions in conan/tools/apple/xcodedeps.py]
(definition of _xcconfig_settings_filename:)
def _xcconfig_settings_filename(settings):
(definition of _xcconfig_conditional:)
def _xcconfig_conditional(settings):
(definition of _add_include_to_file_or_create:)
def _add_include_to_file_or_create(filename, template, include):
(definition of XcodeDeps._global_xconfig_content:)
def _global_xconfig_content(self):
[end of new definitions in conan/tools/apple/xcodedeps.py]
[start of new definitions in conan/tools/apple/xcodetoolchain.py]
(definition of XcodeToolchain:)
class XcodeToolchain(object):
(definition of XcodeToolchain.__init__:)
def __init__(self, conanfile):
(definition of XcodeToolchain.generate:)
def generate(self):
(definition of XcodeToolchain._cppstd:)
def _cppstd(self):
(definition of XcodeToolchain._macosx_deployment_target:)
def _macosx_deployment_target(self):
(definition of XcodeToolchain._clang_cxx_library:)
def _clang_cxx_library(self):
(definition of XcodeToolchain._clang_cxx_language_standard:)
def _clang_cxx_language_standard(self):
(definition of XcodeToolchain._vars_xconfig_filename:)
def _vars_xconfig_filename(self):
(definition of XcodeToolchain._vars_xconfig_content:)
def _vars_xconfig_content(self):
(definition of XcodeToolchain._agreggated_xconfig_content:)
def _agreggated_xconfig_content(self):
(definition of XcodeToolchain._global_xconfig_content:)
def _global_xconfig_content(self):
(definition of XcodeToolchain._agreggated_xconfig_filename:)
def _agreggated_xconfig_filename(self):
[end of new definitions in conan/tools/apple/xcodetoolchain.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
Textualize__textual-293 | 293 | Textualize/textual | null | 0c7c7ac964be831c3b23b076c700d5122dea313a | 2022-02-17T11:09:01Z | diff --git a/docs/reference/tabs.md b/docs/reference/tabs.md
new file mode 100644
index 0000000000..2f1151c7a6
--- /dev/null
+++ b/docs/reference/tabs.md
@@ -0,0 +1,1 @@
+::: textual.widgets.tabs.Tabs
diff --git a/examples/dev_sandbox.py b/examples/dev_sandbox.py
deleted file mode 100644
index 8a43a203e1..0000000000
--- a/examples/dev_sandbox.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from rich.console import RenderableType
-from rich.panel import Panel
-
-from textual.app import App
-from textual.widget import Widget
-
-
-class PanelWidget(Widget):
- def render(self) -> RenderableType:
- return Panel("hello world!", title="Title")
-
-
-class BasicApp(App):
- """Sandbox application used for testing/development by Textual developers"""
-
- def on_load(self):
- """Bind keys here."""
- self.bind("tab", "toggle_class('#sidebar', '-active')")
- self.bind("a", "toggle_class('#header', '-visible')")
- self.bind("c", "toggle_class('#content', '-content-visible')")
- self.bind("d", "toggle_class('#footer', 'dim')")
-
- def on_mount(self):
- """Build layout here."""
- self.mount(
- header=Widget(),
- content=PanelWidget(),
- footer=Widget(),
- sidebar=Widget(),
- )
-
-
-BasicApp.run(css_file="dev_sandbox.scss", watch_css=True, log="textual.log")
diff --git a/examples/dev_sandbox.scss b/examples/dev_sandbox.scss
deleted file mode 100644
index 987c0f09ec..0000000000
--- a/examples/dev_sandbox.scss
+++ /dev/null
@@ -1,63 +0,0 @@
-/* CSS file for dev_sandbox.py */
-
-$text: #f0f0f0;
-$primary: #021720;
-$secondary:#95d52a;
-$background: #262626;
-
-$primary-style: $text on $background;
-$animation-speed: 500ms;
-$animation: offset $animation-speed in_out_cubic;
-
-App > View {
- docks: side=left/1;
- text: on $background;
-}
-
-Widget:hover {
- outline: heavy;
- text: bold !important;
-}
-
-#sidebar {
- text: $primary-style;
- dock: side;
- width: 30;
- offset-x: -100%;
- transition: $animation;
- border-right: outer $secondary;
-}
-
-#sidebar.-active {
- offset-x: 0;
-}
-
-#header {
- text: $text on $primary;
- height: 3;
- border-bottom: hkey $secondary;
-}
-
-#header.-visible {
- visibility: hidden;
-}
-
-#content {
- text: $text on $background;
- offset-y: -3;
-}
-
-#content.-content-visible {
- visibility: hidden;
-}
-
-#footer {
- opacity: 1;
- text: $text on $primary;
- height: 3;
- border-top: hkey $secondary;
-}
-
-#footer.dim {
- opacity: 0.5;
-}
diff --git a/sandbox/tabs.py b/sandbox/tabs.py
new file mode 100644
index 0000000000..1ad1434ffa
--- /dev/null
+++ b/sandbox/tabs.py
@@ -0,0 +1,147 @@
+from dataclasses import dataclass
+
+from rich.console import RenderableType
+from rich.padding import Padding
+from rich.rule import Rule
+
+from textual import events
+from textual.app import App
+from textual.widget import Widget
+from textual.widgets.tabs import Tabs, Tab
+
+
+class Hr(Widget):
+ def render(self) -> RenderableType:
+ return Rule()
+
+
+class Info(Widget):
+ DEFAULT_STYLES = "height: 2;"
+
+ def __init__(self, text: str) -> None:
+ super().__init__()
+ self.text = text
+
+ def render(self) -> RenderableType:
+ return Padding(f"{self.text}", pad=(0, 1))
+
+
+@dataclass
+class WidgetDescription:
+ description: str
+ widget: Widget
+
+
+class BasicApp(App):
+ """Sandbox application used for testing/development by Textual developers"""
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.keys_to_tabs = {
+ "1": Tab("January", name="one"),
+ "2": Tab("に月", name="two"),
+ "3": Tab("March", name="three"),
+ "4": Tab("April", name="four"),
+ "5": Tab("May", name="five"),
+ "6": Tab("And a really long tab!", name="six"),
+ }
+ tabs = list(self.keys_to_tabs.values())
+ self.examples = [
+ WidgetDescription(
+ "Customise the spacing between tabs, e.g. tab_padding=1",
+ Tabs(
+ tabs,
+ tab_padding=1,
+ ),
+ ),
+ WidgetDescription(
+ "Change the opacity of inactive tab text, e.g. inactive_text_opacity=.2",
+ Tabs(
+ tabs,
+ active_tab="two",
+ active_bar_style="#1493FF",
+ inactive_text_opacity=0.2,
+ tab_padding=2,
+ ),
+ ),
+ WidgetDescription(
+ "Change the color of the inactive portions of the underline, e.g. inactive_bar_style='blue'",
+ Tabs(
+ tabs,
+ active_tab="four",
+ inactive_bar_style="blue",
+ ),
+ ),
+ WidgetDescription(
+ "Change the color of the active portion of the underline, e.g. active_bar_style='red'",
+ Tabs(
+ tabs,
+ active_tab="five",
+ active_bar_style="red",
+ inactive_text_opacity=1,
+ ),
+ ),
+ WidgetDescription(
+ "Change the styling of active and inactive labels (active_tab_style, inactive_tab_style)",
+ Tabs(
+ tabs,
+ active_tab="one",
+ active_bar_style="#DA812D",
+ active_tab_style="bold #FFCB4D on #021720",
+ inactive_tab_style="italic #887AEF on #021720",
+ inactive_bar_style="#695CC8",
+ inactive_text_opacity=0.6,
+ ),
+ ),
+ WidgetDescription(
+ "Change the animation duration and function (animation_duration=1, animation_function='out_quad')",
+ Tabs(
+ tabs,
+ active_tab="one",
+ active_bar_style="#887AEF",
+ inactive_text_opacity=0.2,
+ animation_duration=1,
+ animation_function="out_quad",
+ ),
+ ),
+ WidgetDescription(
+ "Choose which tab to start on by name, e.g. active_tab='three'",
+ Tabs(
+ tabs,
+ active_tab="three",
+ active_bar_style="#FFCB4D",
+ tab_padding=3,
+ ),
+ ),
+ ]
+
+ def on_load(self):
+ """Bind keys here."""
+ self.bind("tab", "toggle_class('#sidebar', '-active')")
+ self.bind("a", "toggle_class('#header', '-visible')")
+ self.bind("c", "toggle_class('#content', '-content-visible')")
+ self.bind("d", "toggle_class('#footer', 'dim')")
+
+ def on_key(self, event: events.Key) -> None:
+ for example in self.examples:
+ tab = self.keys_to_tabs.get(event.key)
+ if tab:
+ example.widget._active_tab_name = tab.name
+
+ def on_mount(self):
+ """Build layout here."""
+ self.mount(
+ info=Info(
+ "\n"
+ "• The examples below show customisation options for the [bold #1493FF]Tabs[/] widget.\n"
+ "• Press keys 1-6 on your keyboard to switch tabs, or click on a tab.",
+ )
+ )
+ for example in self.examples:
+ info = Info(example.description)
+ self.mount(Hr())
+ self.mount(info)
+ self.mount(example.widget)
+
+
+BasicApp.run(css_file="tabs.scss", watch_css=True, log="textual.log")
diff --git a/sandbox/tabs.scss b/sandbox/tabs.scss
new file mode 100644
index 0000000000..70f893ee49
--- /dev/null
+++ b/sandbox/tabs.scss
@@ -0,0 +1,9 @@
+$background: #021720;
+
+App > View {
+ text: on $background;
+}
+
+#info {
+ height: 4;
+}
diff --git a/src/textual/_event_broker.py b/src/textual/_event_broker.py
index a5bd074aca..1b63a6cf5e 100644
--- a/src/textual/_event_broker.py
+++ b/src/textual/_event_broker.py
@@ -24,5 +24,4 @@ def extract_handler_actions(event_name: str, meta: dict[str, Any]) -> HandlerArg
if __name__ == "__main__":
-
print(extract_handler_actions("mouse.down", {"@mouse.down.hot": "app.bell()"}))
diff --git a/src/textual/_layout_resolve.py b/src/textual/_layout_resolve.py
index e8530c3ada..1a929622aa 100644
--- a/src/textual/_layout_resolve.py
+++ b/src/textual/_layout_resolve.py
@@ -2,7 +2,7 @@
import sys
from fractions import Fraction
-from typing import cast, List, Optional, Sequence
+from typing import cast, List, Sequence, NamedTuple
if sys.version_info >= (3, 8):
from typing import Protocol
@@ -10,15 +10,21 @@
from typing_extensions import Protocol # pragma: no cover
-class Edge(Protocol):
+class EdgeProtocol(Protocol):
"""Any object that defines an edge (such as Layout)."""
- size: Optional[int] = None
- fraction: int = 1
+ size: int | None
+ min_size: int
+ fraction: int | None
+
+
+class Edge(NamedTuple):
+ size: int | None = None
min_size: int = 1
+ fraction: int | None = 1
-def layout_resolve(total: int, edges: Sequence[Edge]) -> List[int]:
+def layout_resolve(total: int, edges: Sequence[EdgeProtocol]) -> List[int]:
"""Divide total space to satisfy size, fraction, and min_size, constraints.
The returned list of integers should add up to total in most cases, unless it is
@@ -29,7 +35,7 @@ def layout_resolve(total: int, edges: Sequence[Edge]) -> List[int]:
Args:
total (int): Total number of characters.
- edges (List[Edge]): Edges within total space.
+ edges (List[EdgeProtocol]): Edges within total space.
Returns:
List[int]: Number of characters for each edge.
diff --git a/src/textual/renderables/opacity.py b/src/textual/renderables/opacity.py
index 238904a29b..f1d73d335a 100644
--- a/src/textual/renderables/opacity.py
+++ b/src/textual/renderables/opacity.py
@@ -33,11 +33,12 @@ def __rich_console__(
fg = style.color
bg = style.bgcolor
if fg and fg.triplet and bg and bg.triplet:
+ color_style = _get_blended_style_cached(
+ fg_color=fg, bg_color=bg, opacity=opacity
+ )
yield Segment(
segment.text,
- _get_blended_style_cached(
- fg_color=fg, bg_color=bg, opacity=opacity
- ),
+ style + color_style,
segment.control,
)
else:
diff --git a/src/textual/renderables/underline_bar.py b/src/textual/renderables/underline_bar.py
index 59c9e6bb40..c0d508df67 100644
--- a/src/textual/renderables/underline_bar.py
+++ b/src/textual/renderables/underline_bar.py
@@ -1,8 +1,8 @@
from __future__ import annotations
from rich.console import ConsoleOptions, Console, RenderResult
-from rich.segment import Segment
from rich.style import StyleType
+from rich.text import Text
class UnderlineBar:
@@ -20,11 +20,13 @@ def __init__(
highlight_range: tuple[float, float] = (0, 0),
highlight_style: StyleType = "magenta",
background_style: StyleType = "grey37",
+ clickable_ranges: dict[str, tuple[int, int]] | None = None,
width: int | None = None,
) -> None:
self.highlight_range = highlight_range
self.highlight_style = highlight_style
self.background_style = background_style
+ self.clickable_ranges = clickable_ranges or {}
self.width = width
def __rich_console__(
@@ -43,8 +45,11 @@ def __rich_console__(
start = max(start, 0)
end = min(end, width)
+ output_bar = Text("", end="")
+
if start == end == 0 or end < 0 or start > end:
- yield Segment(bar * width, style=background_style)
+ output_bar.append(Text(bar * width, style=background_style, end=""))
+ yield output_bar
return
# Round start and end to nearest half
@@ -56,23 +61,39 @@ def __rich_console__(
half_end = end - int(end) > 0
# Initial non-highlighted portion of bar
- yield Segment(bar * (int(start - 0.5)), style=background_style)
+ output_bar.append(
+ Text(bar * (int(start - 0.5)), style=background_style, end="")
+ )
if not half_start and start > 0:
- yield Segment(half_bar_right, style=background_style)
+ output_bar.append(Text(half_bar_right, style=background_style, end=""))
# The highlighted portion
bar_width = int(end) - int(start)
if half_start:
- yield Segment(half_bar_left + bar * (bar_width - 1), style=highlight_style)
+ output_bar.append(
+ Text(
+ half_bar_left + bar * (bar_width - 1), style=highlight_style, end=""
+ )
+ )
else:
- yield Segment(bar * bar_width, style=highlight_style)
+ output_bar.append(Text(bar * bar_width, style=highlight_style, end=""))
if half_end:
- yield Segment(half_bar_right, style=highlight_style)
+ output_bar.append(Text(half_bar_right, style=highlight_style, end=""))
# The non-highlighted tail
if not half_end and end - width != 0:
- yield Segment(half_bar_left, style=background_style)
- yield Segment(bar * (int(width) - int(end) - 1), style=background_style)
+ output_bar.append(Text(half_bar_left, style=background_style, end=""))
+ output_bar.append(
+ Text(bar * (int(width) - int(end) - 1), style=background_style, end="")
+ )
+
+ # Fire actions when certain ranges are clicked (e.g. for tabs)
+ for range_name, (start, end) in self.clickable_ranges.items():
+ output_bar.apply_meta(
+ {"@click": f"range_clicked('{range_name}')"}, start, end
+ )
+
+ yield output_bar
if __name__ == "__main__":
@@ -102,17 +123,13 @@ def frange(start, end, step):
for range in ranges:
color = random.choice(list(ANSI_COLOR_NAMES.keys()))
console.print(
- UnderlineBar(
- range,
- highlight_style=color,
- width=20,
- ),
+ UnderlineBar(range, highlight_style=color, width=20),
f" {range}",
)
from rich.live import Live
- bar = UnderlineBar(width=80, highlight_range=(0, 4.5))
+ bar = UnderlineBar(highlight_range=(0, 4.5), width=80)
with Live(bar, refresh_per_second=60) as live:
while True:
bar.highlight_range = (
diff --git a/src/textual/widgets/_header.py b/src/textual/widgets/_header.py
index 643dc9e829..a475e04448 100644
--- a/src/textual/widgets/_header.py
+++ b/src/textual/widgets/_header.py
@@ -3,16 +3,15 @@
from datetime import datetime
from logging import getLogger
-from rich.console import Console, ConsoleOptions, RenderableType
+from rich.console import RenderableType
from rich.panel import Panel
-from rich.repr import rich_repr, Result
+from rich.repr import Result
from rich.style import StyleType
from rich.table import Table
-from rich.text import TextType
from .. import events
-from ..widget import Widget
from ..reactive import watch, Reactive
+from ..widget import Widget
log = getLogger("rich")
diff --git a/src/textual/widgets/tabs.py b/src/textual/widgets/tabs.py
new file mode 100644
index 0000000000..4346816f6a
--- /dev/null
+++ b/src/textual/widgets/tabs.py
@@ -0,0 +1,344 @@
+from __future__ import annotations
+
+import string
+from dataclasses import dataclass
+from typing import Iterable
+
+from rich.cells import cell_len
+from rich.console import Console, ConsoleOptions, RenderableType, RenderResult
+from rich.segment import Segment
+from rich.style import StyleType, Style
+from rich.text import Text
+
+from textual import events
+from textual._layout_resolve import layout_resolve, Edge
+from textual.keys import Keys
+from textual.reactive import Reactive
+from textual.renderables.opacity import Opacity
+from textual.renderables.underline_bar import UnderlineBar
+from textual.widget import Widget
+
+__all__ = ["Tab", "Tabs"]
+
+
+@dataclass
+class Tab:
+ """Data container representing a single tab.
+
+ Attributes:
+ label (str): The user-facing label that will appear inside the tab.
+ name (str, optional): A unique string key that will identify the tab. If None, it will default to the label.
+ If the name is not unique within a single list of tabs, only the final Tab will be displayed.
+ """
+
+ label: str
+ name: str | None = None
+
+ def __post_init__(self):
+ if self.name is None:
+ self.name = self.label
+
+ def __str__(self):
+ return self.label
+
+
+class TabsRenderable:
+ """Renderable for the Tabs widget."""
+
+ def __init__(
+ self,
+ tabs: Iterable[Tab],
+ *,
+ active_tab_name: str,
+ active_tab_style: StyleType,
+ active_bar_style: StyleType,
+ inactive_tab_style: StyleType,
+ inactive_bar_style: StyleType,
+ inactive_text_opacity: float,
+ tab_padding: int | None,
+ bar_offset: float,
+ width: int | None = None,
+ ):
+ self.tabs = {tab.name: tab for tab in tabs}
+
+ try:
+ self.active_tab_name = active_tab_name or next(iter(self.tabs))
+ except StopIteration:
+ self.active_tab_name = None
+
+ self.active_tab_style = active_tab_style
+ self.active_bar_style = active_bar_style
+
+ self.inactive_tab_style = inactive_tab_style
+ self.inactive_bar_style = inactive_bar_style
+
+ self.bar_offset = bar_offset
+ self.width = width
+ self.tab_padding = tab_padding
+ self.inactive_text_opacity = inactive_text_opacity
+
+ self._label_range_cache: dict[str, tuple[int, int]] = {}
+ self._selection_range_cache: dict[str, tuple[int, int]] = {}
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ if self.tabs:
+ yield from self.get_tab_labels(console, options)
+ yield Segment.line()
+ yield from self.get_underline_bar(console)
+
+ def get_tab_labels(self, console: Console, options: ConsoleOptions) -> RenderResult:
+ """Yields the spaced-out labels that appear above the line for the Tabs widget"""
+ width = self.width or options.max_width
+ tab_values = self.tabs.values()
+
+ space = Edge(size=self.tab_padding or None, min_size=1, fraction=1)
+ edges = []
+ for tab in tab_values:
+ tab = Edge(size=cell_len(tab.label), min_size=1, fraction=None)
+ edges.extend([space, tab, space])
+
+ spacing = layout_resolve(width, edges=edges)
+
+ active_tab_style = console.get_style(self.active_tab_style)
+ inactive_tab_style = console.get_style(self.inactive_tab_style)
+
+ label_cell_cursor = 0
+ for tab_index, tab in enumerate(tab_values):
+ tab_edge_index = tab_index * 3 + 1
+
+ len_label_text = spacing[tab_edge_index]
+ lpad = spacing[tab_edge_index - 1]
+ rpad = spacing[tab_edge_index + 1]
+
+ label_left_padding = Text(" " * lpad, end="")
+ label_right_padding = Text(" " * rpad, end="")
+
+ padded_label = f"{label_left_padding}{tab.label}{label_right_padding}"
+ if tab.name == self.active_tab_name:
+ yield Text(padded_label, end="", style=active_tab_style)
+ else:
+ tab_content = Text(
+ padded_label,
+ end="",
+ style=inactive_tab_style
+ + Style.from_meta({"@click": f"range_clicked('{tab.name}')"}),
+ )
+ dimmed_tab_content = Opacity(
+ tab_content, opacity=self.inactive_text_opacity
+ )
+ segments = console.render(dimmed_tab_content)
+ yield from segments
+
+ # Cache the position of the label text within this tab
+ label_cell_cursor += lpad
+ self._label_range_cache[tab.name] = (
+ label_cell_cursor,
+ label_cell_cursor + len_label_text,
+ )
+ label_cell_cursor += len_label_text + rpad
+
+ # Cache the position of the whole tab, i.e. the range that can be clicked
+ self._selection_range_cache[tab.name] = (
+ label_cell_cursor - lpad,
+ label_cell_cursor + len_label_text + rpad,
+ )
+
+ def get_underline_bar(self, console: Console) -> RenderResult:
+ """Yields the bar that appears below the tab labels in the Tabs widget"""
+ if self.tabs:
+ ranges = self._label_range_cache
+ tab_index = int(self.bar_offset)
+ next_tab_index = (tab_index + 1) % len(ranges)
+ range_values = list(ranges.values())
+ tab1_start, tab1_end = range_values[tab_index]
+ tab2_start, tab2_end = range_values[next_tab_index]
+
+ bar_start = tab1_start + (tab2_start - tab1_start) * (
+ self.bar_offset - tab_index
+ )
+ bar_end = tab1_end + (tab2_end - tab1_end) * (self.bar_offset - tab_index)
+ else:
+ bar_start = 0
+ bar_end = 0
+ underline = UnderlineBar(
+ highlight_range=(bar_start, bar_end),
+ highlight_style=self.active_bar_style,
+ background_style=self.inactive_bar_style,
+ clickable_ranges=self._selection_range_cache,
+ )
+ yield from console.render(underline)
+
+
+class Tabs(Widget):
+ """Widget which displays a set of horizontal tabs.
+
+ Args:
+ tabs (list[Tab]): A list of Tab objects defining the tabs which should be rendered.
+ active_tab (str, optional): The name of the tab that should be active on first render.
+ active_tab_style (StyleType): Style to apply to the label of the active tab.
+ active_bar_style (StyleType): Style to apply to the underline of the active tab.
+ inactive_tab_style (StyleType): Style to apply to the label of inactive tabs.
+ inactive_bar_style (StyleType): Style to apply to the underline of inactive tabs.
+ inactive_text_opacity (float): Opacity of the text labels of inactive tabs.
+ animation_duration (float): The duration of the tab change animation, in seconds.
+ animation_function (str): The easing function to use for the tab change animation.
+ tab_padding (int, optional): The padding at the side of each tab. If None, tabs will
+ automatically be padded such that they fit the available horizontal space.
+ search_by_first_character (bool): If True, entering a character on your keyboard
+ will activate the next tab (in left-to-right order) with a label starting with
+ that character.
+ """
+
+ DEFAULT_STYLES = "height: 2;"
+
+ _active_tab_name: Reactive[str | None] = Reactive("")
+ _bar_offset: Reactive[float] = Reactive(0.0)
+
+ def __init__(
+ self,
+ tabs: list[Tab],
+ active_tab: str | None = None,
+ active_tab_style: StyleType = "#f0f0f0 on #021720",
+ active_bar_style: StyleType = "#1BB152",
+ inactive_tab_style: StyleType = "#f0f0f0 on #021720",
+ inactive_bar_style: StyleType = "#455058",
+ inactive_text_opacity: float = 0.5,
+ animation_duration: float = 0.3,
+ animation_function: str = "out_cubic",
+ tab_padding: int | None = None,
+ search_by_first_character: bool = True,
+ ) -> None:
+ super().__init__()
+ self.tabs = tabs
+
+ self._bar_offset = float(self.find_tab_by_name(active_tab) or 0)
+ self._active_tab_name = active_tab or next(iter(self.tabs), None)
+
+ self.active_tab_style = active_tab_style
+ self.active_bar_style = active_bar_style
+
+ self.inactive_bar_style = inactive_bar_style
+ self.inactive_tab_style = inactive_tab_style
+ self.inactive_text_opacity = inactive_text_opacity
+
+ self.animation_function = animation_function
+ self.animation_duration = animation_duration
+
+ self.tab_padding = tab_padding
+
+ self.search_by_first_character = search_by_first_character
+
+ def on_key(self, event: events.Key) -> None:
+ """Handles key press events when this widget is in focus.
+ Pressing "escape" removes focus from this widget. Use the left and
+ right arrow keys to cycle through tabs. Use number keys to jump to tabs
+ based in their number ("1" jumps to the leftmost tab). Type a character
+ to cycle through tabs with labels beginning with that character.
+
+ Args:
+ event (events.Key): The Key event being handled
+ """
+ if not self.tabs:
+ event.prevent_default()
+ return
+
+ if event.key == Keys.Escape:
+ self.app.set_focus(None)
+ elif event.key == Keys.Right:
+ self.activate_next_tab()
+ elif event.key == Keys.Left:
+ self.activate_previous_tab()
+ elif event.key in string.digits:
+ self.activate_tab_by_number(int(event.key))
+ elif self.search_by_first_character:
+ self.activate_tab_by_first_char(event.key)
+
+ event.prevent_default()
+
+ def activate_next_tab(self) -> None:
+ """Activate the tab to the right of the currently active tab"""
+ current_tab_index = self.find_tab_by_name(self._active_tab_name)
+ next_tab_index = (current_tab_index + 1) % len(self.tabs)
+ next_tab_name = self.tabs[next_tab_index].name
+ self._active_tab_name = next_tab_name
+
+ def activate_previous_tab(self) -> None:
+ """Activate the tab to the left of the currently active tab"""
+ current_tab_index = self.find_tab_by_name(self._active_tab_name)
+ previous_tab_index = current_tab_index - 1
+ previous_tab_name = self.tabs[previous_tab_index].name
+ self._active_tab_name = previous_tab_name
+
+ def activate_tab_by_first_char(self, char: str) -> None:
+ """Activate the next tab that begins with the character
+
+ Args:
+ char (str): The character to search for
+ """
+
+ def find_next_matching_tab(
+ char: str, start: int | None, end: int | None
+ ) -> Tab | None:
+ for tab in self.tabs[start:end]:
+ if tab.label.lower().startswith(char.lower()):
+ return tab
+
+ current_tab_index = self.find_tab_by_name(self._active_tab_name)
+ next_tab_index = (current_tab_index + 1) % len(self.tabs)
+
+ next_matching_tab = find_next_matching_tab(char, next_tab_index, None)
+ if not next_matching_tab:
+ next_matching_tab = find_next_matching_tab(char, None, current_tab_index)
+
+ if next_matching_tab:
+ self._active_tab_name = next_matching_tab.name
+
+ def activate_tab_by_number(self, tab_number: int) -> None:
+ """Activate a tab using the tab number.
+
+ Args:
+ tab_number (int): The number of the tab.
+ The leftmost tab is number 1, the next is 2, and so on. 0 represents the 10th tab.
+ """
+ if tab_number > len(self.tabs):
+ return
+ if tab_number == 0 and len(self.tabs) >= 10:
+ tab_number = 10
+ self._active_tab_name = self.tabs[tab_number - 1].name
+
+ def action_range_clicked(self, target_tab_name: str) -> None:
+ """Handles 'range_clicked' actions which are fired when tabs are clicked"""
+ self._active_tab_name = target_tab_name
+
+ def watch__active_tab_name(self, tab_name: str) -> None:
+ """Animates the underline bar position when the active tab changes"""
+ target_tab_index = self.find_tab_by_name(tab_name)
+ self.animate(
+ "_bar_offset",
+ float(target_tab_index),
+ easing=self.animation_function,
+ duration=self.animation_duration,
+ )
+
+ def find_tab_by_name(self, tab_name: str) -> int:
+ """Return the index of the first tab with a certain name
+
+ Args:
+ tab_name (str): The name to search for.
+ """
+ return next((i for i, tab in enumerate(self.tabs) if tab.name == tab_name), 0)
+
+ def render(self) -> RenderableType:
+ return TabsRenderable(
+ self.tabs,
+ tab_padding=self.tab_padding,
+ active_tab_name=self._active_tab_name,
+ active_tab_style=self.active_tab_style,
+ active_bar_style=self.active_bar_style,
+ inactive_tab_style=self.inactive_tab_style,
+ inactive_bar_style=self.inactive_bar_style,
+ bar_offset=self._bar_offset,
+ inactive_text_opacity=self.inactive_text_opacity,
+ )
| diff --git a/tests/renderables/test_underline_bar.py b/tests/renderables/test_underline_bar.py
index 5c5e4de9ce..549b331e3b 100644
--- a/tests/renderables/test_underline_bar.py
+++ b/tests/renderables/test_underline_bar.py
@@ -1,3 +1,9 @@
+from unittest.mock import create_autospec
+
+from rich.console import Console
+from rich.console import ConsoleOptions
+from rich.text import Text
+
from tests.utilities.render import render
from textual.renderables.underline_bar import UnderlineBar
@@ -111,12 +117,7 @@ def test_highlight_full_range_out_of_bounds_start():
def test_custom_styles():
- bar = UnderlineBar(
- highlight_range=(2, 4),
- highlight_style="red",
- background_style="green",
- width=6
- )
+ bar = UnderlineBar(highlight_range=(2, 4), highlight_style="red", background_style="green", width=6)
assert render(bar) == (
f"{GREEN}━{STOP}"
f"{GREEN}╸{STOP}"
@@ -124,3 +125,19 @@ def test_custom_styles():
f"{GREEN}╺{STOP}"
f"{GREEN}━{STOP}"
)
+
+
+def test_clickable_ranges():
+ bar = UnderlineBar(highlight_range=(0, 1), width=6, clickable_ranges={"foo": (0, 2), "bar": (4, 5)})
+
+ console = create_autospec(Console)
+ options = create_autospec(ConsoleOptions)
+ text: Text = list(bar.__rich_console__(console, options))[0]
+
+ start, end, style = text.spans[-2]
+ assert (start, end) == (0, 2)
+ assert style.meta == {'@click': "range_clicked('foo')"}
+
+ start, end, style = text.spans[-1]
+ assert (start, end) == (4, 5)
+ assert style.meta == {'@click': "range_clicked('bar')"}
| diff --git a/docs/reference/tabs.md b/docs/reference/tabs.md
new file mode 100644
index 0000000000..2f1151c7a6
--- /dev/null
+++ b/docs/reference/tabs.md
@@ -0,0 +1,1 @@
+::: textual.widgets.tabs.Tabs
diff --git a/examples/dev_sandbox.scss b/examples/dev_sandbox.scss
deleted file mode 100644
index 987c0f09ec..0000000000
--- a/examples/dev_sandbox.scss
+++ /dev/null
@@ -1,63 +0,0 @@
-/* CSS file for dev_sandbox.py */
-
-$text: #f0f0f0;
-$primary: #021720;
-$secondary:#95d52a;
-$background: #262626;
-
-$primary-style: $text on $background;
-$animation-speed: 500ms;
-$animation: offset $animation-speed in_out_cubic;
-
-App > View {
- docks: side=left/1;
- text: on $background;
-}
-
-Widget:hover {
- outline: heavy;
- text: bold !important;
-}
-
-#sidebar {
- text: $primary-style;
- dock: side;
- width: 30;
- offset-x: -100%;
- transition: $animation;
- border-right: outer $secondary;
-}
-
-#sidebar.-active {
- offset-x: 0;
-}
-
-#header {
- text: $text on $primary;
- height: 3;
- border-bottom: hkey $secondary;
-}
-
-#header.-visible {
- visibility: hidden;
-}
-
-#content {
- text: $text on $background;
- offset-y: -3;
-}
-
-#content.-content-visible {
- visibility: hidden;
-}
-
-#footer {
- opacity: 1;
- text: $text on $primary;
- height: 3;
- border-top: hkey $secondary;
-}
-
-#footer.dim {
- opacity: 0.5;
-}
diff --git a/sandbox/tabs.scss b/sandbox/tabs.scss
new file mode 100644
index 0000000000..70f893ee49
--- /dev/null
+++ b/sandbox/tabs.scss
@@ -0,0 +1,9 @@
+$background: #021720;
+
+App > View {
+ text: on $background;
+}
+
+#info {
+ height: 4;
+}
| [
{
"components": [
{
"doc": "",
"lines": [
13,
15
],
"name": "Hr",
"signature": "class Hr(Widget):",
"type": "class"
},
{
"doc": "",
"lines": [
14,
15
],
"name": "Hr.render",
... | [
"tests/renderables/test_underline_bar.py::test_clickable_ranges"
] | [
"tests/renderables/test_underline_bar.py::test_no_highlight",
"tests/renderables/test_underline_bar.py::test_highlight_from_zero",
"tests/renderables/test_underline_bar.py::test_highlight_from_zero_point_five",
"tests/renderables/test_underline_bar.py::test_highlight_middle",
"tests/renderables/test_underli... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Tabs
## Example
The video below shows a few different features:
* Clicking on the underline or anywhere on the bar will activate the nearest tab.
* Using "left" and "right" arrows on the keyboard will cycle through tabs, wrapping around
* Pressing a number key will cause the tab with that number to activate
* Pressing a letter will "search" through tab labels, so when I press "M" key, it jumps to "March" to "May". It is cycling through tabs that begin with the letter "M".
https://user-images.githubusercontent.com/5740731/154511136-d26e1ff2-ec16-4047-b05f-4ecad4cc90b9.mov
## Demo
See the demo in the `sandbox` dir: `python -m tabs` - or watch [the video](https://www.youtube.com/watch?v=59CVt9gVcvo).
Screenshot of the demo:

When there are no tabs, only the "inactive" underline renders:

## Outstanding tasks
- [x] Increase tab click target to include padding
- [x] Handle case where user supplies no tabs - just render inactive underline
- [x] Increase tab click target to include underline area
- [x] Widget-level keyboard control
- [x] Minor rounding error fix
- [x] Docstrings
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sandbox/tabs.py]
(definition of Hr:)
class Hr(Widget):
(definition of Hr.render:)
def render(self) -> RenderableType:
(definition of Info:)
class Info(Widget):
(definition of Info.__init__:)
def __init__(self, text: str) -> None:
(definition of Info.render:)
def render(self) -> RenderableType:
(definition of WidgetDescription:)
class WidgetDescription:
(definition of BasicApp:)
class BasicApp(App):
"""Sandbox application used for testing/development by Textual developers"""
(definition of BasicApp.__init__:)
def __init__(self, *args, **kwargs):
(definition of BasicApp.on_load:)
def on_load(self):
"""Bind keys here."""
(definition of BasicApp.on_key:)
def on_key(self, event: events.Key) -> None:
(definition of BasicApp.on_mount:)
def on_mount(self):
"""Build layout here."""
[end of new definitions in sandbox/tabs.py]
[start of new definitions in src/textual/_layout_resolve.py]
(definition of EdgeProtocol:)
class EdgeProtocol(Protocol):
"""Any object that defines an edge (such as Layout)."""
[end of new definitions in src/textual/_layout_resolve.py]
[start of new definitions in src/textual/widgets/tabs.py]
(definition of Tab:)
class Tab:
"""Data container representing a single tab.
Attributes:
label (str): The user-facing label that will appear inside the tab.
name (str, optional): A unique string key that will identify the tab. If None, it will default to the label.
If the name is not unique within a single list of tabs, only the final Tab will be displayed."""
(definition of Tab.__post_init__:)
def __post_init__(self):
(definition of Tab.__str__:)
def __str__(self):
(definition of TabsRenderable:)
class TabsRenderable:
"""Renderable for the Tabs widget."""
(definition of TabsRenderable.__init__:)
def __init__( self, tabs: Iterable[Tab], *, active_tab_name: str, active_tab_style: StyleType, active_bar_style: StyleType, inactive_tab_style: StyleType, inactive_bar_style: StyleType, inactive_text_opacity: float, tab_padding: int | None, bar_offset: float, width: int | None = None, ):
(definition of TabsRenderable.__rich_console__:)
def __rich_console__( self, console: Console, options: ConsoleOptions ) -> RenderResult:
(definition of TabsRenderable.get_tab_labels:)
def get_tab_labels(self, console: Console, options: ConsoleOptions) -> RenderResult:
"""Yields the spaced-out labels that appear above the line for the Tabs widget"""
(definition of TabsRenderable.get_underline_bar:)
def get_underline_bar(self, console: Console) -> RenderResult:
"""Yields the bar that appears below the tab labels in the Tabs widget"""
(definition of Tabs:)
class Tabs(Widget):
"""Widget which displays a set of horizontal tabs.
Args:
tabs (list[Tab]): A list of Tab objects defining the tabs which should be rendered.
active_tab (str, optional): The name of the tab that should be active on first render.
active_tab_style (StyleType): Style to apply to the label of the active tab.
active_bar_style (StyleType): Style to apply to the underline of the active tab.
inactive_tab_style (StyleType): Style to apply to the label of inactive tabs.
inactive_bar_style (StyleType): Style to apply to the underline of inactive tabs.
inactive_text_opacity (float): Opacity of the text labels of inactive tabs.
animation_duration (float): The duration of the tab change animation, in seconds.
animation_function (str): The easing function to use for the tab change animation.
tab_padding (int, optional): The padding at the side of each tab. If None, tabs will
automatically be padded such that they fit the available horizontal space.
search_by_first_character (bool): If True, entering a character on your keyboard
will activate the next tab (in left-to-right order) with a label starting with
that character."""
(definition of Tabs.__init__:)
def __init__( self, tabs: list[Tab], active_tab: str | None = None, active_tab_style: StyleType = "
(definition of Tabs.on_key:)
def on_key(self, event: events.Key) -> None:
"""Handles key press events when this widget is in focus.
Pressing "escape" removes focus from this widget. Use the left and
right arrow keys to cycle through tabs. Use number keys to jump to tabs
based in their number ("1" jumps to the leftmost tab). Type a character
to cycle through tabs with labels beginning with that character.
Args:
event (events.Key): The Key event being handled"""
(definition of Tabs.activate_next_tab:)
def activate_next_tab(self) -> None:
"""Activate the tab to the right of the currently active tab"""
(definition of Tabs.activate_previous_tab:)
def activate_previous_tab(self) -> None:
"""Activate the tab to the left of the currently active tab"""
(definition of Tabs.activate_tab_by_first_char:)
def activate_tab_by_first_char(self, char: str) -> None:
"""Activate the next tab that begins with the character
Args:
char (str): The character to search for"""
(definition of Tabs.activate_tab_by_first_char.find_next_matching_tab:)
def find_next_matching_tab( char: str, start: int | None, end: int | None ) -> Tab | None:
(definition of Tabs.activate_tab_by_number:)
def activate_tab_by_number(self, tab_number: int) -> None:
"""Activate a tab using the tab number.
Args:
tab_number (int): The number of the tab.
The leftmost tab is number 1, the next is 2, and so on. 0 represents the 10th tab."""
(definition of Tabs.action_range_clicked:)
def action_range_clicked(self, target_tab_name: str) -> None:
"""Handles 'range_clicked' actions which are fired when tabs are clicked"""
(definition of Tabs.watch__active_tab_name:)
def watch__active_tab_name(self, tab_name: str) -> None:
"""Animates the underline bar position when the active tab changes"""
(definition of Tabs.find_tab_by_name:)
def find_tab_by_name(self, tab_name: str) -> int:
"""Return the index of the first tab with a certain name
Args:
tab_name (str): The name to search for."""
(definition of Tabs.render:)
def render(self) -> RenderableType:
[end of new definitions in src/textual/widgets/tabs.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 86e93536b991014e0ea4bf993068202b446bb698 | |
scikit-learn__scikit-learn-22518 | 22,518 | scikit-learn/scikit-learn | 1.2 | 84f8409dc5c485729649c5332e66fd5602549b50 | 2022-02-17T10:01:39Z | diff --git a/doc/modules/classes.rst b/doc/modules/classes.rst
index bc58d50ce8f81..c6838556d50ad 100644
--- a/doc/modules/classes.rst
+++ b/doc/modules/classes.rst
@@ -954,6 +954,7 @@ details.
metrics.average_precision_score
metrics.balanced_accuracy_score
metrics.brier_score_loss
+ metrics.class_likelihood_ratios
metrics.classification_report
metrics.cohen_kappa_score
metrics.confusion_matrix
diff --git a/doc/modules/model_evaluation.rst b/doc/modules/model_evaluation.rst
index d8fe7d87eec7a..34412576f80aa 100644
--- a/doc/modules/model_evaluation.rst
+++ b/doc/modules/model_evaluation.rst
@@ -311,6 +311,7 @@ Some of these are restricted to the binary classification case:
precision_recall_curve
roc_curve
+ class_likelihood_ratios
det_curve
@@ -876,6 +877,8 @@ In this context, we can define the notions of precision, recall and F-measure:
F_\beta = (1 + \beta^2) \frac{\text{precision} \times \text{recall}}{\beta^2 \text{precision} + \text{recall}}.
+Sometimes recall is also called ''sensitivity''.
+
Here are some small examples in binary classification::
>>> from sklearn import metrics
@@ -1756,6 +1759,133 @@ the same does a lower Brier score loss always mean better calibration"
and probability estimation." <https://drops.dagstuhl.de/opus/volltexte/2008/1382/>`_
Dagstuhl Seminar Proceedings. Schloss Dagstuhl-Leibniz-Zentrum fr Informatik (2008).
+.. _class_likelihood_ratios:
+
+Class likelihood ratios
+-----------------------
+
+The :func:`class_likelihood_ratios` function computes the `positive and negative
+likelihood ratios
+<https://en.wikipedia.org/wiki/Likelihood_ratios_in_diagnostic_testing>`_
+:math:`LR_\pm` for binary classes, which can be interpreted as the ratio of
+post-test to pre-test odds as explained below. As a consequence, this metric is
+invariant w.r.t. the class prevalence (the number of samples in the positive
+class divided by the total number of samples) and **can be extrapolated between
+populations regardless of any possible class imbalance.**
+
+The :math:`LR_\pm` metrics are therefore very useful in settings where the data
+available to learn and evaluate a classifier is a study population with nearly
+balanced classes, such as a case-control study, while the target application,
+i.e. the general population, has very low prevalence.
+
+The positive likelihood ratio :math:`LR_+` is the probability of a classifier to
+correctly predict that a sample belongs to the positive class divided by the
+probability of predicting the positive class for a sample belonging to the
+negative class:
+
+.. math::
+
+ LR_+ = \frac{\text{PR}(P+|T+)}{\text{PR}(P+|T-)}.
+
+The notation here refers to predicted (:math:`P`) or true (:math:`T`) label and
+the sign :math:`+` and :math:`-` refer to the positive and negative class,
+respectively, e.g. :math:`P+` stands for "predicted positive".
+
+Analogously, the negative likelihood ratio :math:`LR_-` is the probability of a
+sample of the positive class being classified as belonging to the negative class
+divided by the probability of a sample of the negative class being correctly
+classified:
+
+.. math::
+
+ LR_- = \frac{\text{PR}(P-|T+)}{\text{PR}(P-|T-)}.
+
+For classifiers above chance :math:`LR_+` above 1 **higher is better**, while
+:math:`LR_-` ranges from 0 to 1 and **lower is better**.
+Values of :math:`LR_\pm\approx 1` correspond to chance level.
+
+Notice that probabilities differ from counts, for instance
+:math:`\operatorname{PR}(P+|T+)` is not equal to the number of true positive
+counts ``tp`` (see `the wikipedia page
+<https://en.wikipedia.org/wiki/Likelihood_ratios_in_diagnostic_testing>`_ for
+the actual formulas).
+
+**Interpretation across varying prevalence:**
+
+Both class likelihood ratios are interpretable in terms of an odds ratio
+(pre-test and post-tests):
+
+.. math::
+
+ \text{post-test odds} = \text{Likelihood ratio} \times \text{pre-test odds}.
+
+Odds are in general related to probabilities via
+
+.. math::
+
+ \text{odds} = \frac{\text{probability}}{1 - \text{probability}},
+
+or equivalently
+
+.. math::
+
+ \text{probability} = \frac{\text{odds}}{1 + \text{odds}}.
+
+On a given population, the pre-test probability is given by the prevalence. By
+converting odds to probabilities, the likelihood ratios can be translated into a
+probability of truly belonging to either class before and after a classifier
+prediction:
+
+.. math::
+
+ \text{post-test odds} = \text{Likelihood ratio} \times
+ \frac{\text{pre-test probability}}{1 - \text{pre-test probability}},
+
+.. math::
+
+ \text{post-test probability} = \frac{\text{post-test odds}}{1 + \text{post-test odds}}.
+
+**Mathematical divergences:**
+
+The positive likelihood ratio is undefined when :math:`fp = 0`, which can be
+interpreted as the classifier perfectly identifying positive cases. If :math:`fp
+= 0` and additionally :math:`tp = 0`, this leads to a zero/zero division. This
+happens, for instance, when using a `DummyClassifier` that always predicts the
+negative class and therefore the interpretation as a perfect classifier is lost.
+
+The negative likelihood ratio is undefined when :math:`tn = 0`. Such divergence
+is invalid, as :math:`LR_- > 1` would indicate an increase in the odds of a
+sample belonging to the positive class after being classified as negative, as if
+the act of classifying caused the positive condition. This includes the case of
+a `DummyClassifier` that always predicts the positive class (i.e. when
+:math:`tn=fn=0`).
+
+Both class likelihood ratios are undefined when :math:`tp=fn=0`, which means
+that no samples of the positive class were present in the testing set. This can
+also happen when cross-validating highly imbalanced data.
+
+In all the previous cases the :func:`class_likelihood_ratios` function raises by
+default an appropriate warning message and returns `nan` to avoid pollution when
+averaging over cross-validation folds.
+
+For a worked-out demonstration of the :func:`class_likelihood_ratios` function,
+see the example below.
+
+.. topic:: Examples:
+
+ * :ref:`sphx_glr_auto_examples_model_selection_plot_likelihood_ratios.py`
+
+.. topic:: References:
+
+ * `Wikipedia entry for Likelihood ratios in diagnostic testing
+ <https://en.wikipedia.org/wiki/Likelihood_ratios_in_diagnostic_testing>`_
+
+ * Brenner, H., & Gefeller, O. (1997).
+ Variation of sensitivity, specificity, likelihood ratios and predictive
+ values with disease prevalence.
+ Statistics in medicine, 16(9), 981-991.
+
+
.. _multilabel_ranking_metrics:
Multilabel ranking metrics
diff --git a/doc/whats_new/v1.2.rst b/doc/whats_new/v1.2.rst
index 561eaa17bca6f..63f9f4e2ead75 100644
--- a/doc/whats_new/v1.2.rst
+++ b/doc/whats_new/v1.2.rst
@@ -71,6 +71,14 @@ Changelog
now conducts validation for `max_features` and `feature_names_in` parameters.
:pr:`23299` by :user:`Long Bao <lorentzbao>`.
+:mod:`sklearn.metrics`
+......................
+
+- |Feature| :func:`class_likelihood_ratios` is added to compute the positive and
+ negative likelihood ratios derived from the confusion matrix
+ of a binary classification problem. :pr:`22518` by
+ :user:`Arturo Amor <ArturoAmorQ>`.
+
:mod:`sklearn.neighbors`
........................
diff --git a/examples/model_selection/plot_likelihood_ratios.py b/examples/model_selection/plot_likelihood_ratios.py
new file mode 100644
index 0000000000000..01a2962f3fe2f
--- /dev/null
+++ b/examples/model_selection/plot_likelihood_ratios.py
@@ -0,0 +1,325 @@
+"""
+=============================================================
+Class Likelihood Ratios to measure classification performance
+=============================================================
+
+This example demonstrates the :func:`~sklearn.metrics.class_likelihood_ratios`
+function, which computes the positive and negative likelihood ratios (`LR+`,
+`LR-`) to assess the predictive power of a binary classifier. As we will see,
+these metrics are independent of the proportion between classes in the test set,
+which makes them very useful when the available data for a study has a different
+class proportion than the target application.
+
+A typical use is a case-control study in medicine, which has nearly balanced
+classes while the general population has large class imbalance. In such
+application, the pre-test probability of an individual having the target
+condition can be chosen to be the prevalence, i.e. the proportion of a
+particular population found to be affected by a medical condition. The post-test
+probabilities represent then the probability that the condition is truly present
+given a positive test result.
+
+In this example we first discuss the link between pre-test and post-test odds
+given by the :ref:`class_likelihood_ratios`. Then we evaluate their behavior in
+some controlled scenarios. In the last section we plot them as a function of the
+prevalence of the positive class.
+
+"""
+
+# Authors: Arturo Amor <david-arturo.amor-quiroz@inria.fr>
+# Olivier Grisel <olivier.grisel@ensta.org>
+# %%
+# Pre-test vs. post-test analysis
+# ===============================
+#
+# Suppose we have a population of subjects with physiological measurements `X`
+# that can hopefully serve as indirect bio-markers of the disease and actual
+# disease indicators `y` (ground truth). Most of the people in the population do
+# not carry the disease but a minority (in this case around 10%) does:
+
+from sklearn.datasets import make_classification
+
+X, y = make_classification(n_samples=10_000, weights=[0.9, 0.1], random_state=0)
+print(f"Percentage of people carrying the disease: {100*y.mean():.2f}%")
+
+# %%
+# A machine learning model is built to diagnose if a person with some given
+# physiological measurements is likely to carry the disease of interest. To
+# evaluate the model, we need to assess its performance on a held-out test set:
+
+from sklearn.model_selection import train_test_split
+
+X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
+
+# %%
+# Then we can fit our diagnosis model and compute the positive likelihood
+# ratio to evaluate the usefulness of this classifier as a disease diagnosis
+# tool:
+
+from sklearn.metrics import class_likelihood_ratios
+from sklearn.linear_model import LogisticRegression
+
+estimator = LogisticRegression().fit(X_train, y_train)
+y_pred = estimator.predict(X_test)
+pos_LR, neg_LR = class_likelihood_ratios(y_test, y_pred)
+print(f"LR+: {pos_LR:.3f}")
+
+# %%
+# Since the positive class likelihood ratio is much larger than 1.0, it means
+# that the machine learning-based diagnosis tool is useful: the post-test odds
+# that the condition is truly present given a positive test result are more than
+# 12 times larger than the pre-test odds.
+#
+# Cross-validation of likelihood ratios
+# =====================================
+#
+# We assess the variability of the measurements for the class likelihood ratios
+# in some particular cases.
+
+import pandas as pd
+
+
+def scoring(estimator, X, y):
+ y_pred = estimator.predict(X)
+ pos_lr, neg_lr = class_likelihood_ratios(y, y_pred, raise_warning=False)
+ return {"positive_likelihood_ratio": pos_lr, "negative_likelihood_ratio": neg_lr}
+
+
+def extract_score(cv_results):
+ lr = pd.DataFrame(
+ {
+ "positive": cv_results["test_positive_likelihood_ratio"],
+ "negative": cv_results["test_negative_likelihood_ratio"],
+ }
+ )
+ return lr.aggregate(["mean", "std"])
+
+
+# %%
+# We first validate the :class:`~sklearn.linear_model.LogisticRegression` model
+# with default hyperparameters as used in the previous section.
+
+from sklearn.model_selection import cross_validate
+
+estimator = LogisticRegression()
+extract_score(cross_validate(estimator, X, y, scoring=scoring, cv=10))
+
+# %%
+# We confirm that the model is useful: the post-test odds are between 12 and 20
+# times larger than the pre-test odds.
+#
+# On the contrary, let's consider a dummy model that will output random
+# predictions with similar odds as the average disease prevalence in the
+# training set:
+
+from sklearn.dummy import DummyClassifier
+
+estimator = DummyClassifier(strategy="stratified", random_state=1234)
+extract_score(cross_validate(estimator, X, y, scoring=scoring, cv=10))
+
+# %%
+# Here both class likelihood ratios are compatible with 1.0 which makes this
+# classifier useless as a diagnostic tool to improve disease detection.
+#
+# Another option for the dummy model is to always predict the most frequent
+# class, which in this case is "no-disease".
+
+estimator = DummyClassifier(strategy="most_frequent")
+extract_score(cross_validate(estimator, X, y, scoring=scoring, cv=10))
+
+# %%
+# The absence of positive predictions means there will be no true positives nor
+# false positives, leading to an undefined `LR+` that by no means should be
+# interpreted as an infinite `LR+` (the classifier perfectly identifying
+# positive cases). In such situation the
+# :func:`~sklearn.metrics.class_likelihood_ratios` function returns `nan` and
+# raises a warning by default. Indeed, the value of `LR-` helps us discard this
+# model.
+#
+# A similar scenario may arise when cross-validating highly imbalanced data with
+# few samples: some folds will have no samples with the disease and therefore
+# they will output no true positives nor false negatives when used for testing.
+# Mathematically this leads to an infinite `LR+`, which should also not be
+# interpreted as the model perfectly identifying positive cases. Such event
+# leads to a higher variance of the estimated likelihood ratios, but can still
+# be interpreted as an increment of the post-test odds of having the condition.
+
+estimator = LogisticRegression()
+X, y = make_classification(n_samples=300, weights=[0.9, 0.1], random_state=0)
+extract_score(cross_validate(estimator, X, y, scoring=scoring, cv=10))
+
+# %%
+# Invariance with respect to prevalence
+# =====================================
+#
+# The likelihood ratios are independent of the disease prevalence and can be
+# extrapolated between populations regardless of any possible class imbalance,
+# **as long as the same model is applied to all of them**. Notice that in the
+# plots below **the decision boundary is constant** (see
+# :ref:`sphx_glr_auto_examples_svm_plot_separating_hyperplane_unbalanced.py` for
+# a study of the boundary decision for unbalanced classes).
+#
+# Here we train a :class:`~sklearn.linear_model.LogisticRegression` base model
+# on a case-control study with a prevalence of 50%. It is then evaluated over
+# populations with varying prevalence. We use the
+# :func:`~sklearn.datasets.make_classification` function to ensure the
+# data-generating process is always the same as shown in the plots below. The
+# label `1` corresponds to the positive class "disease", whereas the label `0`
+# stands for "no-disease".
+
+import numpy as np
+import matplotlib.pyplot as plt
+from sklearn.inspection import DecisionBoundaryDisplay
+from collections import defaultdict
+
+populations = defaultdict(list)
+common_params = {
+ "n_samples": 10_000,
+ "n_features": 2,
+ "n_informative": 2,
+ "n_redundant": 0,
+ "random_state": 0,
+}
+weights = np.linspace(0.1, 0.8, 6)
+weights = weights[::-1]
+
+# fit and evaluate base model on balanced classes
+X, y = make_classification(**common_params, weights=[0.5, 0.5])
+estimator = LogisticRegression().fit(X, y)
+lr_base = extract_score(cross_validate(estimator, X, y, scoring=scoring, cv=10))
+pos_lr_base, pos_lr_base_std = lr_base["positive"].values
+neg_lr_base, neg_lr_base_std = lr_base["negative"].values
+
+# %%
+# We will now show the decision boundary for each level of prevalence. Note that
+# we only plot a subset of the original data to better assess the linear model
+# decision boundary.
+
+fig, axs = plt.subplots(nrows=3, ncols=2, figsize=(15, 12))
+
+for ax, (n, weight) in zip(axs.ravel(), enumerate(weights)):
+
+ X, y = make_classification(
+ **common_params,
+ weights=[weight, 1 - weight],
+ )
+ prevalence = y.mean()
+ populations["prevalence"].append(prevalence)
+ populations["X"].append(X)
+ populations["y"].append(y)
+
+ # down-sample for plotting
+ rng = np.random.RandomState(1)
+ plot_indices = rng.choice(np.arange(X.shape[0]), size=500, replace=True)
+ X_plot, y_plot = X[plot_indices], y[plot_indices]
+
+ # plot fixed decision boundary of base model with varying prevalence
+ disp = DecisionBoundaryDisplay.from_estimator(
+ estimator,
+ X_plot,
+ response_method="predict",
+ alpha=0.5,
+ ax=ax,
+ )
+ scatter = disp.ax_.scatter(X_plot[:, 0], X_plot[:, 1], c=y_plot, edgecolor="k")
+ disp.ax_.set_title(f"prevalence = {y_plot.mean():.2f}")
+ disp.ax_.legend(*scatter.legend_elements())
+
+# %%
+# We define a function for bootstraping.
+
+
+def scoring_on_bootstrap(estimator, X, y, rng, n_bootstrap=100):
+ results_for_prevalence = defaultdict(list)
+ for _ in range(n_bootstrap):
+ bootstrap_indices = rng.choice(
+ np.arange(X.shape[0]), size=X.shape[0], replace=True
+ )
+ for key, value in scoring(
+ estimator, X[bootstrap_indices], y[bootstrap_indices]
+ ).items():
+ results_for_prevalence[key].append(value)
+ return pd.DataFrame(results_for_prevalence)
+
+
+# %%
+# We score the base model for each prevalence using bootstraping.
+
+results = defaultdict(list)
+n_bootstrap = 100
+rng = np.random.default_rng(seed=0)
+
+for prevalence, X, y in zip(
+ populations["prevalence"], populations["X"], populations["y"]
+):
+
+ results_for_prevalence = scoring_on_bootstrap(
+ estimator, X, y, rng, n_bootstrap=n_bootstrap
+ )
+ results["prevalence"].append(prevalence)
+ results["metrics"].append(
+ results_for_prevalence.aggregate(["mean", "std"]).unstack()
+ )
+
+results = pd.DataFrame(results["metrics"], index=results["prevalence"])
+results.index.name = "prevalence"
+results
+
+# %%
+# In the plots below we observe that the class likelihood ratios re-computed
+# with different prevalences are indeed constant within one standard deviation
+# of those computed with on balanced classes.
+
+fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(15, 6))
+results["positive_likelihood_ratio"]["mean"].plot(
+ ax=ax1, color="r", label="extrapolation through populations"
+)
+ax1.axhline(y=pos_lr_base + pos_lr_base_std, color="r", linestyle="--")
+ax1.axhline(
+ y=pos_lr_base - pos_lr_base_std,
+ color="r",
+ linestyle="--",
+ label="base model confidence band",
+)
+ax1.fill_between(
+ results.index,
+ results["positive_likelihood_ratio"]["mean"]
+ - results["positive_likelihood_ratio"]["std"],
+ results["positive_likelihood_ratio"]["mean"]
+ + results["positive_likelihood_ratio"]["std"],
+ color="r",
+ alpha=0.3,
+)
+ax1.set(
+ title="Positive likelihood ratio",
+ ylabel="LR+",
+ ylim=[0, 5],
+)
+ax1.legend(loc="lower right")
+
+ax2 = results["negative_likelihood_ratio"]["mean"].plot(
+ ax=ax2, color="b", label="extrapolation through populations"
+)
+ax2.axhline(y=neg_lr_base + neg_lr_base_std, color="b", linestyle="--")
+ax2.axhline(
+ y=neg_lr_base - neg_lr_base_std,
+ color="b",
+ linestyle="--",
+ label="base model confidence band",
+)
+ax2.fill_between(
+ results.index,
+ results["negative_likelihood_ratio"]["mean"]
+ - results["negative_likelihood_ratio"]["std"],
+ results["negative_likelihood_ratio"]["mean"]
+ + results["negative_likelihood_ratio"]["std"],
+ color="b",
+ alpha=0.3,
+)
+ax2.set(
+ title="Negative likelihood ratio",
+ ylabel="LR-",
+ ylim=[0, 0.5],
+)
+ax2.legend(loc="lower right")
+
+plt.show()
diff --git a/sklearn/metrics/__init__.py b/sklearn/metrics/__init__.py
index 0c6f74a8b7f38..7ed6fa318b64a 100644
--- a/sklearn/metrics/__init__.py
+++ b/sklearn/metrics/__init__.py
@@ -19,6 +19,7 @@
from ._classification import accuracy_score
from ._classification import balanced_accuracy_score
+from ._classification import class_likelihood_ratios
from ._classification import classification_report
from ._classification import cohen_kappa_score
from ._classification import confusion_matrix
@@ -108,6 +109,7 @@
"balanced_accuracy_score",
"calinski_harabasz_score",
"check_scoring",
+ "class_likelihood_ratios",
"classification_report",
"cluster",
"cohen_kappa_score",
diff --git a/sklearn/metrics/_classification.py b/sklearn/metrics/_classification.py
index d759f6c4b3e76..a9255f83632b4 100644
--- a/sklearn/metrics/_classification.py
+++ b/sklearn/metrics/_classification.py
@@ -1637,6 +1637,174 @@ def precision_recall_fscore_support(
return precision, recall, f_score, true_sum
+def class_likelihood_ratios(
+ y_true,
+ y_pred,
+ *,
+ labels=None,
+ sample_weight=None,
+ raise_warning=True,
+):
+ """Compute binary classification positive and negative likelihood ratios.
+
+ The positive likelihood ratio is `LR+ = sensitivity / (1 - specificity)`
+ where the sensitivity or recall is the ratio `tp / (tp + fn)` and the
+ specificity is `tn / (tn + fp)`. The negative likelihood ratio is `LR- = (1
+ - sensitivity) / specificity`. Here `tp` is the number of true positives,
+ `fp` the number of false positives, `tn` is the number of true negatives and
+ `fn` the number of false negatives. Both class likelihood ratios can be used
+ to obtain post-test probabilities given a pre-test probability.
+
+ `LR+` ranges from 1 to infinity. A `LR+` of 1 indicates that the probability
+ of predicting the positive class is the same for samples belonging to either
+ class; therefore, the test is useless. The greater `LR+` is, the more a
+ positive prediction is likely to be a true positive when compared with the
+ pre-test probability. A value of `LR+` lower than 1 is invalid as it would
+ indicate that the odds of a sample being a true positive decrease with
+ respect to the pre-test odds.
+
+ `LR-` ranges from 0 to 1. The closer it is to 0, the lower the probability
+ of a given sample to be a false negative. A `LR-` of 1 means the test is
+ useless because the odds of having the condition did not change after the
+ test. A value of `LR-` greater than 1 invalidates the classifier as it
+ indicates an increase in the odds of a sample belonging to the positive
+ class after being classified as negative. This is the case when the
+ classifier systematically predicts the opposite of the true label.
+
+ A typical application in medicine is to identify the positive/negative class
+ to the presence/absence of a disease, respectively; the classifier being a
+ diagnostic test; the pre-test probability of an individual having the
+ disease can be the prevalence of such disease (proportion of a particular
+ population found to be affected by a medical condition); and the post-test
+ probabilities would be the probability that the condition is truly present
+ given a positive test result.
+
+ Read more in the :ref:`User Guide <class_likelihood_ratios>`.
+
+ Parameters
+ ----------
+ y_true : 1d array-like, or label indicator array / sparse matrix
+ Ground truth (correct) target values.
+
+ y_pred : 1d array-like, or label indicator array / sparse matrix
+ Estimated targets as returned by a classifier.
+
+ labels : array-like, default=None
+ List of labels to index the matrix. This may be used to select the
+ positive and negative classes with the ordering `labels=[negative_class,
+ positive_class]`. If `None` is given, those that appear at least once in
+ `y_true` or `y_pred` are used in sorted order.
+
+ sample_weight : array-like of shape (n_samples,), default=None
+ Sample weights.
+
+ raise_warning : bool, default=True
+ Whether or not a case-specific warning message is raised when there is a
+ zero division. Even if the error is not raised, the function will return
+ nan in such cases.
+
+ Returns
+ -------
+ (positive_likelihood_ratio, negative_likelihood_ratio) : tuple
+ A tuple of two float, the first containing the Positive likelihood ratio
+ and the second the Negative likelihood ratio.
+
+ Warns
+ -----
+ When `false positive == 0`, the positive likelihood ratio is undefined.
+ When `true negative == 0`, the negative likelihood ratio is undefined.
+ When `true positive + false negative == 0` both ratios are undefined.
+ In such cases, `UserWarning` will be raised if raise_warning=True.
+
+ References
+ ----------
+ .. [1] `Wikipedia entry for the Likelihood ratios in diagnostic testing
+ <https://en.wikipedia.org/wiki/Likelihood_ratios_in_diagnostic_testing>`_.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from sklearn.metrics import class_likelihood_ratios
+ >>> class_likelihood_ratios([0, 1, 0, 1, 0], [1, 1, 0, 0, 0])
+ (1.5, 0.75)
+ >>> y_true = np.array(["non-cat", "cat", "non-cat", "cat", "non-cat"])
+ >>> y_pred = np.array(["cat", "cat", "non-cat", "non-cat", "non-cat"])
+ >>> class_likelihood_ratios(y_true, y_pred)
+ (1.33..., 0.66...)
+ >>> y_true = np.array(["non-zebra", "zebra", "non-zebra", "zebra", "non-zebra"])
+ >>> y_pred = np.array(["zebra", "zebra", "non-zebra", "non-zebra", "non-zebra"])
+ >>> class_likelihood_ratios(y_true, y_pred)
+ (1.5, 0.75)
+
+ To avoid ambiguities, use the notation `labels=[negative_class,
+ positive_class]`
+
+ >>> y_true = np.array(["non-cat", "cat", "non-cat", "cat", "non-cat"])
+ >>> y_pred = np.array(["cat", "cat", "non-cat", "non-cat", "non-cat"])
+ >>> class_likelihood_ratios(y_true, y_pred, labels=["non-cat", "cat"])
+ (1.5, 0.75)
+ """
+
+ y_type, y_true, y_pred = _check_targets(y_true, y_pred)
+ if y_type != "binary":
+ raise ValueError(
+ "class_likelihood_ratios only supports binary classification "
+ f"problems, got targets of type: {y_type}"
+ )
+
+ cm = confusion_matrix(
+ y_true,
+ y_pred,
+ sample_weight=sample_weight,
+ labels=labels,
+ )
+
+ # Case when `y_test` contains a single class and `y_test == y_pred`.
+ # This may happen when cross-validating imbalanced data and should
+ # not be interpreted as a perfect score.
+ if cm.shape == (1, 1):
+ msg = "samples of only one class were seen during testing "
+ if raise_warning:
+ warnings.warn(msg, UserWarning, stacklevel=2)
+ positive_likelihood_ratio = np.nan
+ negative_likelihood_ratio = np.nan
+ else:
+ tn, fp, fn, tp = cm.ravel()
+ support_pos = tp + fn
+ support_neg = tn + fp
+ pos_num = tp * support_neg
+ pos_denom = fp * support_pos
+ neg_num = fn * support_neg
+ neg_denom = tn * support_pos
+
+ # If zero division warn and set scores to nan, else divide
+ if support_pos == 0:
+ msg = "no samples of the positive class were present in the testing set "
+ if raise_warning:
+ warnings.warn(msg, UserWarning, stacklevel=2)
+ positive_likelihood_ratio = np.nan
+ negative_likelihood_ratio = np.nan
+ if fp == 0:
+ if tp == 0:
+ msg = "no samples predicted for the positive class"
+ else:
+ msg = "positive_likelihood_ratio ill-defined and being set to nan "
+ if raise_warning:
+ warnings.warn(msg, UserWarning, stacklevel=2)
+ positive_likelihood_ratio = np.nan
+ else:
+ positive_likelihood_ratio = pos_num / pos_denom
+ if tn == 0:
+ msg = "negative_likelihood_ratio ill-defined and being set to nan "
+ if raise_warning:
+ warnings.warn(msg, UserWarning, stacklevel=2)
+ negative_likelihood_ratio = np.nan
+ else:
+ negative_likelihood_ratio = neg_num / neg_denom
+
+ return positive_likelihood_ratio, negative_likelihood_ratio
+
+
def precision_score(
y_true,
y_pred,
diff --git a/sklearn/metrics/_scorer.py b/sklearn/metrics/_scorer.py
index e1655af169fcc..e93208f1c67e7 100644
--- a/sklearn/metrics/_scorer.py
+++ b/sklearn/metrics/_scorer.py
@@ -49,6 +49,7 @@
jaccard_score,
mean_absolute_percentage_error,
matthews_corrcoef,
+ class_likelihood_ratios,
)
from .cluster import adjusted_rand_score
@@ -718,6 +719,20 @@ def make_scorer(
balanced_accuracy_scorer = make_scorer(balanced_accuracy_score)
matthews_corrcoef_scorer = make_scorer(matthews_corrcoef)
+
+def positive_likelihood_ratio(y_true, y_pred):
+ return class_likelihood_ratios(y_true, y_pred)[0]
+
+
+def negative_likelihood_ratio(y_true, y_pred):
+ return class_likelihood_ratios(y_true, y_pred)[1]
+
+
+positive_likelihood_ratio_scorer = make_scorer(positive_likelihood_ratio)
+neg_negative_likelihood_ratio_scorer = make_scorer(
+ negative_likelihood_ratio, greater_is_better=False
+)
+
# Score functions that need decision values
top_k_accuracy_scorer = make_scorer(
top_k_accuracy_score, greater_is_better=True, needs_threshold=True
@@ -795,6 +810,8 @@ def __getitem__(self, item):
average_precision=average_precision_scorer,
neg_log_loss=neg_log_loss_scorer,
neg_brier_score=neg_brier_score_scorer,
+ positive_likelihood_ratio=positive_likelihood_ratio_scorer,
+ neg_negative_likelihood_ratio=neg_negative_likelihood_ratio_scorer,
# Cluster metrics that use supervised evaluation
adjusted_rand_score=adjusted_rand_scorer,
rand_score=rand_scorer,
| diff --git a/sklearn/metrics/tests/test_classification.py b/sklearn/metrics/tests/test_classification.py
index 25c2dcda55d9c..02692806016f1 100644
--- a/sklearn/metrics/tests/test_classification.py
+++ b/sklearn/metrics/tests/test_classification.py
@@ -26,6 +26,7 @@
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import balanced_accuracy_score
+from sklearn.metrics import class_likelihood_ratios
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
@@ -600,6 +601,104 @@ def test_confusion_matrix_normalize_single_class():
confusion_matrix(y_pred, y_test, normalize="true")
+@pytest.mark.parametrize(
+ "params, warn_msg",
+ [
+ # When y_test contains one class only and y_test==y_pred, LR+ is undefined
+ (
+ {
+ "y_true": np.array([0, 0, 0, 0, 0, 0]),
+ "y_pred": np.array([0, 0, 0, 0, 0, 0]),
+ },
+ "samples of only one class were seen during testing",
+ ),
+ # When `fp == 0` and `tp != 0`, LR+ is undefined
+ (
+ {
+ "y_true": np.array([1, 1, 1, 0, 0, 0]),
+ "y_pred": np.array([1, 1, 1, 0, 0, 0]),
+ },
+ "positive_likelihood_ratio ill-defined and being set to nan",
+ ),
+ # When `fp == 0` and `tp == 0`, LR+ is undefined
+ (
+ {
+ "y_true": np.array([1, 1, 1, 0, 0, 0]),
+ "y_pred": np.array([0, 0, 0, 0, 0, 0]),
+ },
+ "no samples predicted for the positive class",
+ ),
+ # When `tn == 0`, LR- is undefined
+ (
+ {
+ "y_true": np.array([1, 1, 1, 0, 0, 0]),
+ "y_pred": np.array([0, 0, 0, 1, 1, 1]),
+ },
+ "negative_likelihood_ratio ill-defined and being set to nan",
+ ),
+ # When `tp + fn == 0` both ratios are undefined
+ (
+ {
+ "y_true": np.array([0, 0, 0, 0, 0, 0]),
+ "y_pred": np.array([1, 1, 1, 0, 0, 0]),
+ },
+ "no samples of the positive class were present in the testing set",
+ ),
+ ],
+)
+def test_likelihood_ratios_warnings(params, warn_msg):
+ # likelihood_ratios must raise warnings when at
+ # least one of the ratios is ill-defined.
+
+ with pytest.warns(UserWarning, match=warn_msg):
+ class_likelihood_ratios(**params)
+
+
+@pytest.mark.parametrize(
+ "params, err_msg",
+ [
+ (
+ {
+ "y_true": np.array([0, 1, 0, 1, 0]),
+ "y_pred": np.array([1, 1, 0, 0, 2]),
+ },
+ "class_likelihood_ratios only supports binary classification "
+ "problems, got targets of type: multiclass",
+ ),
+ ],
+)
+def test_likelihood_ratios_errors(params, err_msg):
+ # likelihood_ratios must raise error when attempting
+ # non-binary classes to avoid Simpson's paradox
+ with pytest.raises(ValueError, match=err_msg):
+ class_likelihood_ratios(**params)
+
+
+def test_likelihood_ratios():
+ # Build confusion matrix with tn=9, fp=8, fn=1, tp=2,
+ # sensitivity=2/3, specificity=9/17, prevalence=3/20,
+ # LR+=34/24, LR-=17/27
+ y_true = np.array([1] * 3 + [0] * 17)
+ y_pred = np.array([1] * 2 + [0] * 10 + [1] * 8)
+
+ pos, neg = class_likelihood_ratios(y_true, y_pred)
+ assert_allclose(pos, 34 / 24)
+ assert_allclose(neg, 17 / 27)
+
+ # Build limit case with y_pred = y_true
+ pos, neg = class_likelihood_ratios(y_true, y_true)
+ assert_array_equal(pos, np.nan * 2)
+ assert_allclose(neg, np.zeros(2), rtol=1e-12)
+
+ # Ignore last 5 samples to get tn=9, fp=3, fn=1, tp=2,
+ # sensitivity=2/3, specificity=9/12, prevalence=3/20,
+ # LR+=24/9, LR-=12/27
+ sample_weight = np.array([1.0] * 15 + [0.0] * 5)
+ pos, neg = class_likelihood_ratios(y_true, y_pred, sample_weight=sample_weight)
+ assert_allclose(pos, 24 / 9)
+ assert_allclose(neg, 12 / 27)
+
+
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
diff --git a/sklearn/metrics/tests/test_score_objects.py b/sklearn/metrics/tests/test_score_objects.py
index 23680e48ae3e7..204a895742db7 100644
--- a/sklearn/metrics/tests/test_score_objects.py
+++ b/sklearn/metrics/tests/test_score_objects.py
@@ -104,6 +104,8 @@
"roc_auc_ovr_weighted",
"roc_auc_ovo_weighted",
"matthews_corrcoef",
+ "positive_likelihood_ratio",
+ "neg_negative_likelihood_ratio",
]
# All supervised cluster scorers (They behave like classification metric)
| diff --git a/doc/modules/classes.rst b/doc/modules/classes.rst
index bc58d50ce8f81..c6838556d50ad 100644
--- a/doc/modules/classes.rst
+++ b/doc/modules/classes.rst
@@ -954,6 +954,7 @@ details.
metrics.average_precision_score
metrics.balanced_accuracy_score
metrics.brier_score_loss
+ metrics.class_likelihood_ratios
metrics.classification_report
metrics.cohen_kappa_score
metrics.confusion_matrix
diff --git a/doc/modules/model_evaluation.rst b/doc/modules/model_evaluation.rst
index d8fe7d87eec7a..34412576f80aa 100644
--- a/doc/modules/model_evaluation.rst
+++ b/doc/modules/model_evaluation.rst
@@ -311,6 +311,7 @@ Some of these are restricted to the binary classification case:
precision_recall_curve
roc_curve
+ class_likelihood_ratios
det_curve
@@ -876,6 +877,8 @@ In this context, we can define the notions of precision, recall and F-measure:
F_\beta = (1 + \beta^2) \frac{\text{precision} \times \text{recall}}{\beta^2 \text{precision} + \text{recall}}.
+Sometimes recall is also called ''sensitivity''.
+
Here are some small examples in binary classification::
>>> from sklearn import metrics
@@ -1756,6 +1759,133 @@ the same does a lower Brier score loss always mean better calibration"
and probability estimation." <https://drops.dagstuhl.de/opus/volltexte/2008/1382/>`_
Dagstuhl Seminar Proceedings. Schloss Dagstuhl-Leibniz-Zentrum fr Informatik (2008).
+.. _class_likelihood_ratios:
+
+Class likelihood ratios
+-----------------------
+
+The :func:`class_likelihood_ratios` function computes the `positive and negative
+likelihood ratios
+<https://en.wikipedia.org/wiki/Likelihood_ratios_in_diagnostic_testing>`_
+:math:`LR_\pm` for binary classes, which can be interpreted as the ratio of
+post-test to pre-test odds as explained below. As a consequence, this metric is
+invariant w.r.t. the class prevalence (the number of samples in the positive
+class divided by the total number of samples) and **can be extrapolated between
+populations regardless of any possible class imbalance.**
+
+The :math:`LR_\pm` metrics are therefore very useful in settings where the data
+available to learn and evaluate a classifier is a study population with nearly
+balanced classes, such as a case-control study, while the target application,
+i.e. the general population, has very low prevalence.
+
+The positive likelihood ratio :math:`LR_+` is the probability of a classifier to
+correctly predict that a sample belongs to the positive class divided by the
+probability of predicting the positive class for a sample belonging to the
+negative class:
+
+.. math::
+
+ LR_+ = \frac{\text{PR}(P+|T+)}{\text{PR}(P+|T-)}.
+
+The notation here refers to predicted (:math:`P`) or true (:math:`T`) label and
+the sign :math:`+` and :math:`-` refer to the positive and negative class,
+respectively, e.g. :math:`P+` stands for "predicted positive".
+
+Analogously, the negative likelihood ratio :math:`LR_-` is the probability of a
+sample of the positive class being classified as belonging to the negative class
+divided by the probability of a sample of the negative class being correctly
+classified:
+
+.. math::
+
+ LR_- = \frac{\text{PR}(P-|T+)}{\text{PR}(P-|T-)}.
+
+For classifiers above chance :math:`LR_+` above 1 **higher is better**, while
+:math:`LR_-` ranges from 0 to 1 and **lower is better**.
+Values of :math:`LR_\pm\approx 1` correspond to chance level.
+
+Notice that probabilities differ from counts, for instance
+:math:`\operatorname{PR}(P+|T+)` is not equal to the number of true positive
+counts ``tp`` (see `the wikipedia page
+<https://en.wikipedia.org/wiki/Likelihood_ratios_in_diagnostic_testing>`_ for
+the actual formulas).
+
+**Interpretation across varying prevalence:**
+
+Both class likelihood ratios are interpretable in terms of an odds ratio
+(pre-test and post-tests):
+
+.. math::
+
+ \text{post-test odds} = \text{Likelihood ratio} \times \text{pre-test odds}.
+
+Odds are in general related to probabilities via
+
+.. math::
+
+ \text{odds} = \frac{\text{probability}}{1 - \text{probability}},
+
+or equivalently
+
+.. math::
+
+ \text{probability} = \frac{\text{odds}}{1 + \text{odds}}.
+
+On a given population, the pre-test probability is given by the prevalence. By
+converting odds to probabilities, the likelihood ratios can be translated into a
+probability of truly belonging to either class before and after a classifier
+prediction:
+
+.. math::
+
+ \text{post-test odds} = \text{Likelihood ratio} \times
+ \frac{\text{pre-test probability}}{1 - \text{pre-test probability}},
+
+.. math::
+
+ \text{post-test probability} = \frac{\text{post-test odds}}{1 + \text{post-test odds}}.
+
+**Mathematical divergences:**
+
+The positive likelihood ratio is undefined when :math:`fp = 0`, which can be
+interpreted as the classifier perfectly identifying positive cases. If :math:`fp
+= 0` and additionally :math:`tp = 0`, this leads to a zero/zero division. This
+happens, for instance, when using a `DummyClassifier` that always predicts the
+negative class and therefore the interpretation as a perfect classifier is lost.
+
+The negative likelihood ratio is undefined when :math:`tn = 0`. Such divergence
+is invalid, as :math:`LR_- > 1` would indicate an increase in the odds of a
+sample belonging to the positive class after being classified as negative, as if
+the act of classifying caused the positive condition. This includes the case of
+a `DummyClassifier` that always predicts the positive class (i.e. when
+:math:`tn=fn=0`).
+
+Both class likelihood ratios are undefined when :math:`tp=fn=0`, which means
+that no samples of the positive class were present in the testing set. This can
+also happen when cross-validating highly imbalanced data.
+
+In all the previous cases the :func:`class_likelihood_ratios` function raises by
+default an appropriate warning message and returns `nan` to avoid pollution when
+averaging over cross-validation folds.
+
+For a worked-out demonstration of the :func:`class_likelihood_ratios` function,
+see the example below.
+
+.. topic:: Examples:
+
+ * :ref:`sphx_glr_auto_examples_model_selection_plot_likelihood_ratios.py`
+
+.. topic:: References:
+
+ * `Wikipedia entry for Likelihood ratios in diagnostic testing
+ <https://en.wikipedia.org/wiki/Likelihood_ratios_in_diagnostic_testing>`_
+
+ * Brenner, H., & Gefeller, O. (1997).
+ Variation of sensitivity, specificity, likelihood ratios and predictive
+ values with disease prevalence.
+ Statistics in medicine, 16(9), 981-991.
+
+
.. _multilabel_ranking_metrics:
Multilabel ranking metrics
diff --git a/doc/whats_new/v1.2.rst b/doc/whats_new/v1.2.rst
index 561eaa17bca6f..63f9f4e2ead75 100644
--- a/doc/whats_new/v1.2.rst
+++ b/doc/whats_new/v1.2.rst
@@ -71,6 +71,14 @@ Changelog
now conducts validation for `max_features` and `feature_names_in` parameters.
:pr:`23299` by :user:`Long Bao <lorentzbao>`.
+:mod:`sklearn.metrics`
+......................
+
+- |Feature| :func:`class_likelihood_ratios` is added to compute the positive and
+ negative likelihood ratios derived from the confusion matrix
+ of a binary classification problem. :pr:`22518` by
+ :user:`Arturo Amor <ArturoAmorQ>`.
+
:mod:`sklearn.neighbors`
........................
| [
{
"components": [
{
"doc": "",
"lines": [
81,
84
],
"name": "scoring",
"signature": "def scoring(estimator, X, y):",
"type": "function"
},
{
"doc": "",
"lines": [
87,
94
],
"... | [
"sklearn/metrics/tests/test_classification.py::test_classification_report_dictionary_output",
"sklearn/metrics/tests/test_classification.py::test_classification_report_output_dict_empty_input",
"sklearn/metrics/tests/test_classification.py::test_classification_report_zero_division_warning[warn]",
"sklearn/met... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
FEA Add positive and negative likelihood ratios to metrics
<!--
Thanks for contributing a pull request! Please ensure you have taken a look at
the contribution guidelines: https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md
-->
#### Reference Issues/PRs
<!--
Example: Fixes #1234. See also #3456.
Please use keywords (e.g., Fixes) to create link to the issues or pull requests
you resolved, so that they will automatically be closed when your pull request
is merged. See https://github.com/blog/1506-closing-issues-via-pull-requests
-->
Fixes #22404.
#### What does this implement/fix? Explain your changes.
We agreed that we should add positive and negative likelihood ratios as they are considered as standard good practice in medicine / public health, as well as interpretable in terms of the pre-test versus post-test odds ratio even in class imbalance.
#### Any other comments?
<!--
Please be aware that we are a loose team of volunteers so patience is
necessary; assistance handling other issues is very welcome. We value
all user contributions, no matter how minor they are. If we are slow to
review, either the pull request needs some benchmarking, tinkering,
convincing, etc. or more likely the reviewers are simply busy. In either
case, we ask for your understanding during the review process.
For more information, see our FAQ on this topic:
http://scikit-learn.org/dev/faq.html#why-is-my-pull-request-not-getting-any-attention.
Thanks for contributing!
-->
This is a first PR that creates a broad function that computes both positive and negative likelihood ratios. A second step of creating functions that specifically select one or the other ("positive_likelihood_ratio" and a "negative_likelihood_ratio") will be addressed in another PR once that we agreed on forms and variable names for this one.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in examples/model_selection/plot_likelihood_ratios.py]
(definition of scoring:)
def scoring(estimator, X, y):
(definition of extract_score:)
def extract_score(cv_results):
(definition of scoring_on_bootstrap:)
def scoring_on_bootstrap(estimator, X, y, rng, n_bootstrap=100):
[end of new definitions in examples/model_selection/plot_likelihood_ratios.py]
[start of new definitions in sklearn/metrics/_classification.py]
(definition of class_likelihood_ratios:)
def class_likelihood_ratios( y_true, y_pred, *, labels=None, sample_weight=None, raise_warning=True, ):
"""Compute binary classification positive and negative likelihood ratios.
The positive likelihood ratio is `LR+ = sensitivity / (1 - specificity)`
where the sensitivity or recall is the ratio `tp / (tp + fn)` and the
specificity is `tn / (tn + fp)`. The negative likelihood ratio is `LR- = (1
- sensitivity) / specificity`. Here `tp` is the number of true positives,
`fp` the number of false positives, `tn` is the number of true negatives and
`fn` the number of false negatives. Both class likelihood ratios can be used
to obtain post-test probabilities given a pre-test probability.
`LR+` ranges from 1 to infinity. A `LR+` of 1 indicates that the probability
of predicting the positive class is the same for samples belonging to either
class; therefore, the test is useless. The greater `LR+` is, the more a
positive prediction is likely to be a true positive when compared with the
pre-test probability. A value of `LR+` lower than 1 is invalid as it would
indicate that the odds of a sample being a true positive decrease with
respect to the pre-test odds.
`LR-` ranges from 0 to 1. The closer it is to 0, the lower the probability
of a given sample to be a false negative. A `LR-` of 1 means the test is
useless because the odds of having the condition did not change after the
test. A value of `LR-` greater than 1 invalidates the classifier as it
indicates an increase in the odds of a sample belonging to the positive
class after being classified as negative. This is the case when the
classifier systematically predicts the opposite of the true label.
A typical application in medicine is to identify the positive/negative class
to the presence/absence of a disease, respectively; the classifier being a
diagnostic test; the pre-test probability of an individual having the
disease can be the prevalence of such disease (proportion of a particular
population found to be affected by a medical condition); and the post-test
probabilities would be the probability that the condition is truly present
given a positive test result.
Read more in the :ref:`User Guide <class_likelihood_ratios>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like, default=None
List of labels to index the matrix. This may be used to select the
positive and negative classes with the ordering `labels=[negative_class,
positive_class]`. If `None` is given, those that appear at least once in
`y_true` or `y_pred` are used in sorted order.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
raise_warning : bool, default=True
Whether or not a case-specific warning message is raised when there is a
zero division. Even if the error is not raised, the function will return
nan in such cases.
Returns
-------
(positive_likelihood_ratio, negative_likelihood_ratio) : tuple
A tuple of two float, the first containing the Positive likelihood ratio
and the second the Negative likelihood ratio.
Warns
-----
When `false positive == 0`, the positive likelihood ratio is undefined.
When `true negative == 0`, the negative likelihood ratio is undefined.
When `true positive + false negative == 0` both ratios are undefined.
In such cases, `UserWarning` will be raised if raise_warning=True.
References
----------
.. [1] `Wikipedia entry for the Likelihood ratios in diagnostic testing
<https://en.wikipedia.org/wiki/Likelihood_ratios_in_diagnostic_testing>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import class_likelihood_ratios
>>> class_likelihood_ratios([0, 1, 0, 1, 0], [1, 1, 0, 0, 0])
(1.5, 0.75)
>>> y_true = np.array(["non-cat", "cat", "non-cat", "cat", "non-cat"])
>>> y_pred = np.array(["cat", "cat", "non-cat", "non-cat", "non-cat"])
>>> class_likelihood_ratios(y_true, y_pred)
(1.33..., 0.66...)
>>> y_true = np.array(["non-zebra", "zebra", "non-zebra", "zebra", "non-zebra"])
>>> y_pred = np.array(["zebra", "zebra", "non-zebra", "non-zebra", "non-zebra"])
>>> class_likelihood_ratios(y_true, y_pred)
(1.5, 0.75)
To avoid ambiguities, use the notation `labels=[negative_class,
positive_class]`
>>> y_true = np.array(["non-cat", "cat", "non-cat", "cat", "non-cat"])
>>> y_pred = np.array(["cat", "cat", "non-cat", "non-cat", "non-cat"])
>>> class_likelihood_ratios(y_true, y_pred, labels=["non-cat", "cat"])
(1.5, 0.75)"""
[end of new definitions in sklearn/metrics/_classification.py]
[start of new definitions in sklearn/metrics/_scorer.py]
(definition of positive_likelihood_ratio:)
def positive_likelihood_ratio(y_true, y_pred):
(definition of negative_likelihood_ratio:)
def negative_likelihood_ratio(y_true, y_pred):
[end of new definitions in sklearn/metrics/_scorer.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Add Positive Likelihood Ratio (and negative) to metrics
### Describe the workflow you want to enable
For severely imbalanced binary classification settings, the Positive Likelihood Ratio (or negative) are classic performance metrics of a test used for instance in medicine / public health:
https://en.wikipedia.org/wiki/Likelihood_ratios_in_diagnostic_testing#positive_likelihood_ratio
They come with the benefit of being interpretable in terms of the pre-test versus post-test odds ratio (as mentioned on wikipedia): "The [pretest odds](https://en.wikipedia.org/wiki/Pre-_and_post-test_probability#Pre-test_probability) of a particular diagnosis, multiplied by the likelihood ratio, determines the [post-test odds](https://en.wikipedia.org/wiki/Pre-_and_post-test_probability). This calculation is based on [Bayes' theorem](https://en.wikipedia.org/wiki/Bayes%27_theorem). (Note that odds can be calculated from, and then converted to, [probability](https://en.wikipedia.org/wiki/Probability).)"
As they are both considered as standard good practice in a field, and well interpretable even in class imbalance, I think that we should add them.
### Describe your proposed solution
Add to metrics an entry "positive_likelihood_ratio" and a "negative_likelihood_ratio" which implement these two metrics.
### Describe alternatives you've considered, if relevant
The F1 score or the balanced accuracy (or the AUC ROC, or average precision) are atlernatives. But they cannot be easily interpreted in terms of probabilities of true positive given a detection. The latter is the quantity of interest in some settings.
### Additional context
@ArturoAmorQ is volunteer to pick this up.
----------
This seems to be just any of the zillion measures derived from the confusion matrix, see https://en.wikipedia.org/wiki/Confusion_matrix for more of them. Do we really need more of these?
I would rather favour to see scikit-learn advertise the use of strictly proper scoring rules like the LogLoss or the Brier score.
These are two very different question: a strictly proper scoring rule can be useful for model selection, but a metric such as the positive likelihood ratio will be useful to conclude on whether a model gives a useful prediction for low prevalence.
Out of all the metrics built from the confusion matrix, I suggested this specific one because of its interpretation in terms of pre-test versus post-test log odds. It is a particularly useful one, and should be used more to report model evaluations (it would be hard to conclude on usefulness of models if its model evaluations are reported in terms of log-loss).
> I would rather favour to see scikit-learn advertise the use of strictly proper scoring rules like the LogLoss or the Brier score.
Also both of those metrics are independent of the value of the cut-off for hard-predictions at 0.5. A user might be interested in using a different value for that cut-off based on a metric that has more business/scientific meaning than the log loss of the conditional probabilistic model.
Also log loss / Brier score are not defined for non-probabilistic models such as SVMs with the hinge loss for instance.
--------------------
</issues> | 935f7e66068fc130971646225a95ade649f57928 |
conan-io__conan-10594 | 10,594 | conan-io/conan | null | 606ff7ca246a44cec3fdb5b1abe9863dbf0298d3 | 2022-02-16T12:42:46Z | diff --git a/conan/tools/files/files.py b/conan/tools/files/files.py
index a3375b3d7db..07b1a1e9333 100644
--- a/conan/tools/files/files.py
+++ b/conan/tools/files/files.py
@@ -4,6 +4,7 @@
import hashlib
import os
import platform
+import shutil
import subprocess
import sys
from contextlib import contextmanager
@@ -14,6 +15,7 @@
from conan.tools import CONAN_TOOLCHAIN_ARGS_FILE, CONAN_TOOLCHAIN_ARGS_SECTION
from conans.client.downloaders.download import run_downloader
from conans.errors import ConanException
+from conans.util.files import rmdir
if six.PY3: # Remove this IF in develop2
from shutil import which
@@ -491,3 +493,21 @@ def collect_libs(conanfile, folder=None):
result.append(name)
result.sort()
return result
+
+
+# TODO: Do NOT document this yet. It is unclear the interface, maybe should be split
+def swap_child_folder(parent_folder, child_folder):
+ """ replaces the current folder contents with the contents of one child folder. This
+ is used in the SCM monorepo flow, when it is necessary to use one subproject subfolder
+ to replace the whole cloned git repo
+ """
+ for f in os.listdir(parent_folder):
+ if f != child_folder:
+ path = os.path.join(parent_folder, f)
+ if os.path.isfile(path):
+ os.remove(path)
+ else:
+ rmdir(path)
+ child = os.path.join(parent_folder, child_folder)
+ for f in os.listdir(child):
+ shutil.move(os.path.join(child, f), os.path.join(parent_folder, f))
diff --git a/conan/tools/scm/__init__.py b/conan/tools/scm/__init__.py
new file mode 100644
index 00000000000..f98660aafc2
--- /dev/null
+++ b/conan/tools/scm/__init__.py
@@ -0,0 +1,1 @@
+from conan.tools.scm.git import Git
diff --git a/conan/tools/scm/git.py b/conan/tools/scm/git.py
new file mode 100644
index 00000000000..2e6b24e0f44
--- /dev/null
+++ b/conan/tools/scm/git.py
@@ -0,0 +1,85 @@
+import os
+
+from conan.tools.files import chdir
+from conans.errors import ConanException
+from conans.util.files import mkdir
+from conans.util.runners import check_output_runner
+
+
+class Git(object):
+ def __init__(self, conanfile, folder="."):
+ self._conanfile = conanfile
+ self.folder = folder
+
+ def _run(self, cmd):
+ with chdir(self._conanfile, self.folder):
+ return check_output_runner("git {}".format(cmd)).strip()
+
+ def get_commit(self):
+ try:
+ # commit = self._run("rev-parse HEAD") For the whole repo
+ # This rev-list knows to capture the last commit for the folder
+ commit = self._run('rev-list HEAD -n 1 -- "{}"'.format(self.folder))
+ return commit
+ except Exception as e:
+ raise ConanException("Unable to get git commit in '%s': %s" % (self.folder, str(e)))
+
+ def get_remote_url(self, remote="origin"):
+ remotes = self._run("remote -v")
+ for r in remotes.splitlines():
+ name, url = r.split(maxsplit=1)
+ if name == remote:
+ url, _ = url.rsplit(None, 1)
+ if os.path.exists(url): # Windows local directory
+ url = url.replace("\\", "/")
+ return url
+
+ def commit_in_remote(self, commit, remote="origin"):
+ if not remote:
+ return False
+ try:
+ branches = self._run("branch -r --contains {}".format(commit))
+ return "{}/".format(remote) in branches
+ except Exception as e:
+ raise ConanException("Unable to check remote commit in '%s': %s" % (self.folder, str(e)))
+
+ def is_dirty(self):
+ status = self._run("status -s").strip()
+ return bool(status)
+
+ def get_url_and_commit(self, remote="origin"):
+ dirty = self.is_dirty()
+ if dirty:
+ raise ConanException("Repo is dirty, cannot capture url and commit: "
+ "{}".format(self.folder))
+ commit = self.get_commit()
+ url = self.get_remote_url(remote=remote)
+ in_remote = self.commit_in_remote(commit, remote=remote)
+ if in_remote:
+ return url, commit
+ # TODO: Once we know how to pass [conf] to export, enable this
+ # conf_name = "tools.scm:local"
+ # allow_local = self._conanfile.conf[conf_name]
+ # if not allow_local:
+ # raise ConanException("Current commit {} doesn't exist in remote {}\n"
+ # "use '-c {}=1' to allow it".format(commit, remote, conf_name))
+
+ self._conanfile.output.warn("Current commit {} doesn't exist in remote {}\n"
+ "This revision will not be buildable in other "
+ "computer".format(commit, remote))
+ return self.get_repo_root(), commit
+
+ def get_repo_root(self):
+ folder = self._run("rev-parse --show-toplevel")
+ return folder.replace("\\", "/")
+
+ def clone(self, url, target=""):
+ if os.path.exists(url):
+ url = url.replace("\\", "/") # Windows local directory
+ mkdir(self.folder)
+ self._conanfile.output.info("Cloning git repo")
+ self._run('clone "{}" {}'.format(url, target))
+
+ def checkout(self, commit):
+ self._conanfile.output.info("Checkout: {}".format(commit))
+ self._run('checkout {}'.format(commit))
| diff --git a/conans/test/functional/tools/scm/__init__.py b/conans/test/functional/tools/scm/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/conans/test/functional/tools/scm/test_git.py b/conans/test/functional/tools/scm/test_git.py
new file mode 100644
index 00000000000..2ba29783f57
--- /dev/null
+++ b/conans/test/functional/tools/scm/test_git.py
@@ -0,0 +1,458 @@
+import os
+import re
+import textwrap
+
+import pytest
+import six
+
+from conans.test.utils.scm import create_local_git_repo, git_add_changes_commit, git_create_bare_repo
+from conans.test.utils.test_files import temp_folder
+from conans.test.utils.tools import TestClient
+from conans.util.files import rmdir, save_files
+
+
+@pytest.mark.skipif(six.PY2, reason="Only Py3")
+class TestGitBasicCapture:
+ """ base Git capture operations. They do not raise (unless errors)
+ """
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ from conan.tools.scm import Git
+
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+
+ def export(self):
+ git = Git(self, self.recipe_folder)
+ commit = git.get_commit()
+ url = git.get_remote_url()
+ self.output.info("URL: {}".format(url))
+ self.output.info("COMMIT: {}".format(commit))
+ in_remote = git.commit_in_remote(commit)
+ self.output.info("COMMIT IN REMOTE: {}".format(in_remote))
+ self.output.info("DIRTY: {}".format(git.is_dirty()))
+ """)
+
+ def test_capture_commit_local(self):
+ """
+ A local repo, without remote, will have commit, but no URL
+ """
+ c = TestClient()
+ c.save({"conanfile.py": self.conanfile})
+ commit = c.init_git_repo()
+ c.run("export .")
+ assert "pkg/0.1: COMMIT: {}".format(commit) in c.out
+ assert "pkg/0.1: URL: None" in c.out
+ assert "pkg/0.1: COMMIT IN REMOTE: False" in c.out
+ assert "pkg/0.1: DIRTY: False" in c.out
+
+ def test_capture_remote_url(self):
+ """
+ a cloned repo, will have a default "origin" remote and will manage to get URL
+ """
+ folder = temp_folder()
+ url, commit = create_local_git_repo(files={"conanfile.py": self.conanfile}, folder=folder)
+
+ c = TestClient()
+ c.run_command('git clone "{}" myclone'.format(folder))
+ with c.chdir("myclone"):
+ c.run("export .")
+ assert "pkg/0.1: COMMIT: {}".format(commit) in c.out
+ assert "pkg/0.1: URL: {}".format(url) in c.out
+ assert "pkg/0.1: COMMIT IN REMOTE: True" in c.out
+ assert "pkg/0.1: DIRTY: False" in c.out
+
+ def test_capture_remote_pushed_commit(self):
+ """
+ a cloned repo, after doing some new commit, no longer commit in remote, until push
+ """
+ url = git_create_bare_repo()
+
+ c = TestClient()
+ c.run_command('git clone "{}" myclone'.format(url))
+ with c.chdir("myclone"):
+ c.save({"conanfile.py": self.conanfile + "\n# some coment!"})
+ new_commit = git_add_changes_commit(c.current_folder)
+
+ c.run("export .")
+ assert "pkg/0.1: COMMIT: {}".format(new_commit) in c.out
+ assert "pkg/0.1: URL: {}".format(url) in c.out
+ assert "pkg/0.1: COMMIT IN REMOTE: False" in c.out
+ assert "pkg/0.1: DIRTY: False" in c.out
+ c.run_command("git push")
+ c.run("export .")
+ assert "pkg/0.1: COMMIT: {}".format(new_commit) in c.out
+ assert "pkg/0.1: URL: {}".format(url) in c.out
+ assert "pkg/0.1: COMMIT IN REMOTE: True" in c.out
+ assert "pkg/0.1: DIRTY: False" in c.out
+
+
+@pytest.mark.skipif(six.PY2, reason="Only Py3")
+class TestGitCaptureSCM:
+ """ test the get_url_and_commit() high level method intended for SCM capturing
+ into conandata.yaml
+ """
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ from conan.tools.scm import Git
+
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+
+ def export(self):
+ git = Git(self, self.recipe_folder)
+ scm_url, scm_commit = git.get_url_and_commit()
+ self.output.info("SCM URL: {}".format(scm_url))
+ self.output.info("SCM COMMIT: {}".format(scm_commit))
+ """)
+
+ def test_capture_commit_local(self):
+ """
+ A local repo, without remote, will provide its own URL to the export(),
+ and if it has local changes, it will be marked as dirty, and raise an error
+ """
+ c = TestClient()
+ c.save({"conanfile.py": self.conanfile})
+ commit = c.init_git_repo()
+ c.run("export .")
+ assert "This revision will not be buildable in other computer" in c.out
+ assert "pkg/0.1: SCM COMMIT: {}".format(commit) in c.out
+ assert "pkg/0.1: SCM URL: {}".format(c.current_folder.replace("\\", "/")) in c.out
+
+ c.save({"conanfile.py": self.conanfile + "\n# something...."})
+ c.run("export .", assert_error=True)
+ assert "Repo is dirty, cannot capture url and commit" in c.out
+
+ def test_capture_remote_url(self):
+ """
+ a cloned repo that is expored, will report the URL of the remote
+ """
+ folder = temp_folder()
+ url, commit = create_local_git_repo(files={"conanfile.py": self.conanfile}, folder=folder)
+
+ c = TestClient()
+ c.run_command('git clone "{}" myclone'.format(folder))
+ with c.chdir("myclone"):
+ c.run("export .")
+ assert "pkg/0.1: SCM COMMIT: {}".format(commit) in c.out
+ assert "pkg/0.1: SCM URL: {}".format(url) in c.out
+
+ def test_capture_remote_pushed_commit(self):
+ """
+ a cloned repo, after doing some new commit, no longer commit in remote, until push
+ """
+ url = git_create_bare_repo()
+
+ c = TestClient()
+ c.run_command('git clone "{}" myclone'.format(url))
+ with c.chdir("myclone"):
+ c.save({"conanfile.py": self.conanfile + "\n# some coment!"})
+ new_commit = git_add_changes_commit(c.current_folder)
+
+ c.run("export .")
+ assert "This revision will not be buildable in other computer" in c.out
+ assert "pkg/0.1: SCM COMMIT: {}".format(new_commit) in c.out
+ # NOTE: commit not pushed yet, so locally is the current folder
+ assert "pkg/0.1: SCM URL: {}".format(c.current_folder.replace("\\", "/")) in c.out
+ c.run_command("git push")
+ c.run("export .")
+ assert "pkg/0.1: SCM COMMIT: {}".format(new_commit) in c.out
+ assert "pkg/0.1: SCM URL: {}".format(url) in c.out
+
+
+@pytest.mark.skipif(six.PY2, reason="Only Py3")
+class TestGitBasicClone:
+ """ base Git cloning operations
+ """
+ conanfile = textwrap.dedent("""
+ import os
+ from conan import ConanFile
+ from conan.tools.scm import Git
+ from conan.tools.files import load
+
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+
+ def layout(self):
+ self.folders.source = "source"
+
+ def source(self):
+ git = Git(self)
+ git.clone(url="{url}", target=".")
+ git.checkout(commit="{commit}")
+ self.output.info("MYCMAKE: {{}}".format(load(self, "CMakeLists.txt")))
+ self.output.info("MYFILE: {{}}".format(load(self, "src/myfile.h")))
+ """)
+
+ def test_clone_checkout(self):
+ folder = os.path.join(temp_folder(), "myrepo")
+ url, commit = create_local_git_repo(files={"src/myfile.h": "myheader!",
+ "CMakeLists.txt": "mycmake"}, folder=folder)
+ # This second commit will NOT be used, as I will use the above commit in the conanfile
+ save_files(path=folder, files={"src/myfile.h": "my2header2!"})
+ git_add_changes_commit(folder=folder)
+
+ c = TestClient()
+ c.save({"conanfile.py": self.conanfile.format(url=url, commit=commit)})
+ c.run("create .")
+ assert "pkg/0.1: MYCMAKE: mycmake" in c.out
+ assert "pkg/0.1: MYFILE: myheader!" in c.out
+
+ # It also works in local flow
+ c.run("source .")
+ assert "conanfile.py (pkg/0.1): MYCMAKE: mycmake" in c.out
+ assert "conanfile.py (pkg/0.1): MYFILE: myheader!" in c.out
+ assert c.load("source/src/myfile.h") == "myheader!"
+ assert c.load("source/CMakeLists.txt") == "mycmake"
+
+
+@pytest.mark.skipif(six.PY2, reason="Only Py3")
+class TestGitBasicSCMFlow:
+ """ Build the full new SCM approach:
+ - export() captures the URL and commit with get_url_and_commit(
+ - export() stores it in conandata.yml
+ - source() recovers the info from conandata.yml and clones it
+ """
+ conanfile = textwrap.dedent("""
+ import os
+ from conan import ConanFile
+ from conan.tools.scm import Git
+ from conan.tools.files import load, update_conandata
+
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+
+ def export(self):
+ git = Git(self, self.recipe_folder)
+ scm_url, scm_commit = git.get_url_and_commit()
+ update_conandata(self, {"sources": {"commit": scm_commit, "url": scm_url}})
+
+ def layout(self):
+ self.folders.source = "."
+
+ def source(self):
+ git = Git(self)
+ sources = self.conan_data["sources"]
+ git.clone(url=sources["url"], target=".")
+ git.checkout(commit=sources["commit"])
+ self.output.info("MYCMAKE: {}".format(load(self, "CMakeLists.txt")))
+ self.output.info("MYFILE: {}".format(load(self, "src/myfile.h")))
+
+ def build(self):
+ cmake = os.path.join(self.source_folder, "CMakeLists.txt")
+ file_h = os.path.join(self.source_folder, "src/myfile.h")
+ self.output.info("MYCMAKE-BUILD: {}".format(load(self, cmake)))
+ self.output.info("MYFILE-BUILD: {}".format(load(self, file_h)))
+ """)
+
+ def test_full_scm(self):
+ folder = os.path.join(temp_folder(), "myrepo")
+ url, commit = create_local_git_repo(files={"conanfile.py": self.conanfile,
+ "src/myfile.h": "myheader!",
+ "CMakeLists.txt": "mycmake"}, folder=folder)
+
+ c = TestClient(default_server_user=True)
+ c.run_command('git clone "{}" .'.format(url))
+ c.run("create .")
+ assert "pkg/0.1: MYCMAKE: mycmake" in c.out
+ assert "pkg/0.1: MYFILE: myheader!" in c.out
+ c.run("upload * --all -c")
+
+ # Do a change and commit, this commit will not be used by package
+ save_files(path=folder, files={"src/myfile.h": "my2header2!"})
+ git_add_changes_commit(folder=folder)
+
+ # use another fresh client
+ c2 = TestClient(servers=c.servers)
+ c2.run("install pkg/0.1@ --build=pkg")
+ assert "pkg/0.1: MYCMAKE: mycmake" in c2.out
+ assert "pkg/0.1: MYFILE: myheader!" in c2.out
+
+ # local flow
+ c.run("install .")
+ c.run("build .")
+ assert "conanfile.py (pkg/0.1): MYCMAKE-BUILD: mycmake" in c.out
+ assert "conanfile.py (pkg/0.1): MYFILE-BUILD: myheader!" in c.out
+
+ def test_branch_flow(self):
+ """ Testing that when a user creates a branch, and pushes a commit,
+ the package can still be built from sources, and get_url_and_commit() captures the
+ remote URL and not the local
+ """
+ url = git_create_bare_repo()
+ c = TestClient(default_server_user=True)
+ c.run_command('git clone "{}" .'.format(url))
+ c.save({"conanfile.py": self.conanfile,
+ "src/myfile.h": "myheader!",
+ "CMakeLists.txt": "mycmake"})
+ c.run_command("git checkout -b mybranch")
+ git_add_changes_commit(folder=c.current_folder)
+ c.run_command("git push --set-upstream origin mybranch")
+ c.run("create .")
+ assert "pkg/0.1: MYCMAKE: mycmake" in c.out
+ assert "pkg/0.1: MYFILE: myheader!" in c.out
+ c.run("upload * --all -c")
+ rmdir(c.current_folder) # Remove current folder to make sure things are not used from here
+
+ # use another fresh client
+ c2 = TestClient(servers=c.servers)
+ c2.run("install pkg/0.1@ --build=pkg")
+ assert "pkg/0.1: MYCMAKE: mycmake" in c2.out
+ assert "pkg/0.1: MYFILE: myheader!" in c2.out
+
+
+@pytest.mark.skipif(six.PY2, reason="Only Py3")
+class TestGitBasicSCMFlowSubfolder:
+ """ Same as above, but conanfile.py put in "conan" subfolder in the root
+ """
+ conanfile = textwrap.dedent("""
+ import os
+ from conan import ConanFile
+ from conan.tools.scm import Git
+ from conan.tools.files import load, update_conandata
+
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+
+ def export(self):
+ git = Git(self, os.path.dirname(self.recipe_folder)) # PARENT!
+ scm_url, scm_commit = git.get_url_and_commit()
+ update_conandata(self, {"sources": {"commit": scm_commit, "url": scm_url}})
+
+ def layout(self):
+ self.folders.root = ".."
+ self.folders.source = "."
+
+ def source(self):
+ git = Git(self)
+ sources = self.conan_data["sources"]
+ git.clone(url=sources["url"], target=".")
+ git.checkout(commit=sources["commit"])
+ self.output.info("MYCMAKE: {}".format(load(self, "CMakeLists.txt")))
+ self.output.info("MYFILE: {}".format(load(self, "src/myfile.h")))
+
+ def build(self):
+ cmake = os.path.join(self.source_folder, "CMakeLists.txt")
+ file_h = os.path.join(self.source_folder, "src/myfile.h")
+ self.output.info("MYCMAKE-BUILD: {}".format(load(self, cmake)))
+ self.output.info("MYFILE-BUILD: {}".format(load(self, file_h)))
+ """)
+
+ def test_full_scm(self):
+ folder = os.path.join(temp_folder(), "myrepo")
+ url, commit = create_local_git_repo(files={"conan/conanfile.py": self.conanfile,
+ "src/myfile.h": "myheader!",
+ "CMakeLists.txt": "mycmake"}, folder=folder)
+
+ c = TestClient(default_server_user=True)
+ c.run_command('git clone "{}" .'.format(url))
+ c.run("create conan")
+ assert "pkg/0.1: MYCMAKE: mycmake" in c.out
+ assert "pkg/0.1: MYFILE: myheader!" in c.out
+ c.run("upload * --all -c")
+
+ # Do a change and commit, this commit will not be used by package
+ save_files(path=folder, files={"src/myfile.h": "my2header2!"})
+ git_add_changes_commit(folder=folder)
+
+ # use another fresh client
+ c2 = TestClient(servers=c.servers)
+ c2.run("install pkg/0.1@ --build=pkg")
+ assert "pkg/0.1: MYCMAKE: mycmake" in c2.out
+ assert "pkg/0.1: MYFILE: myheader!" in c2.out
+
+ # local flow
+ c.run("install conan")
+ c.run("build conan")
+ assert "conanfile.py (pkg/0.1): MYCMAKE-BUILD: mycmake" in c.out
+ assert "conanfile.py (pkg/0.1): MYFILE-BUILD: myheader!" in c.out
+
+
+@pytest.mark.skipif(six.PY2, reason="Only Py3")
+class TestGitMonorepoSCMFlow:
+ """ Build the full new SCM approach:
+ Same as above but with a monorepo with multiple subprojects
+ """
+ # TODO: swap_child_folder() not documented, not public usage
+ conanfile = textwrap.dedent("""
+ import os, shutil
+ from conan import ConanFile
+ from conan.tools.scm import Git
+ from conan.tools.files import load, update_conandata
+ from conan.tools.files.files import swap_child_folder
+
+ class Pkg(ConanFile):
+ name = "{pkg}"
+ version = "0.1"
+
+ {requires}
+
+ def export(self):
+ git = Git(self, self.recipe_folder)
+ scm_url, scm_commit = git.get_url_and_commit()
+ self.output.info("CAPTURING COMMIT: {{}}!!!".format(scm_commit))
+ folder = os.path.basename(self.recipe_folder)
+ update_conandata(self, {{"sources": {{"commit": scm_commit, "url": scm_url,
+ "folder": folder}}}})
+
+ def layout(self):
+ self.folders.source = "."
+
+ def source(self):
+ git = Git(self)
+ sources = self.conan_data["sources"]
+ git.clone(url=sources["url"], target=".")
+ git.checkout(commit=sources["commit"])
+ swap_child_folder(self.source_folder, sources["folder"])
+
+ def build(self):
+ cmake = os.path.join(self.source_folder, "CMakeLists.txt")
+ file_h = os.path.join(self.source_folder, "src/myfile.h")
+ self.output.info("MYCMAKE-BUILD: {{}}".format(load(self, cmake)))
+ self.output.info("MYFILE-BUILD: {{}}".format(load(self, file_h)))
+ """)
+
+ def test_full_scm(self):
+ folder = os.path.join(temp_folder(), "myrepo")
+ conanfile1 = self.conanfile.format(pkg="pkg1", requires="")
+ conanfile2 = self.conanfile.format(pkg="pkg2", requires="requires = 'pkg1/0.1'")
+ url, commit = create_local_git_repo(files={"sub1/conanfile.py": conanfile1,
+ "sub1/src/myfile.h": "myheader1!",
+ "sub1/CMakeLists.txt": "mycmake1!",
+ "sub2/conanfile.py": conanfile2,
+ "sub2/src/myfile.h": "myheader2!",
+ "sub2/CMakeLists.txt": "mycmake2!"
+ },
+ folder=folder)
+
+ c = TestClient(default_server_user=True)
+ c.run_command('git clone "{}" .'.format(url))
+ c.run("create sub1")
+ commit = re.search(r"CAPTURING COMMIT: (\S+)!!!", str(c.out)).group(1)
+ assert "pkg1/0.1: MYCMAKE-BUILD: mycmake1!" in c.out
+ assert "pkg1/0.1: MYFILE-BUILD: myheader1!" in c.out
+
+ c.save({"sub2/src/myfile.h": "my2header!"})
+ git_add_changes_commit(folder=c.current_folder)
+ c.run("create sub2")
+ assert "pkg2/0.1: MYCMAKE-BUILD: mycmake2!" in c.out
+ assert "pkg2/0.1: MYFILE-BUILD: my2header!" in c.out
+
+ # Exporting again sub1, gives us exactly the same revision as before
+ c.run("export sub1")
+ assert "CAPTURING COMMIT: {}".format(commit) in c.out
+ c.run("upload * --all -c -r=default")
+
+ # use another fresh client
+ c2 = TestClient(servers=c.servers)
+ c2.run("install pkg2/0.1@ --build")
+ assert "pkg1/0.1: Checkout: {}".format(commit) in c2.out
+ assert "pkg1/0.1: MYCMAKE-BUILD: mycmake1!" in c2.out
+ assert "pkg1/0.1: MYFILE-BUILD: myheader1!" in c2.out
+ assert "pkg2/0.1: MYCMAKE-BUILD: mycmake2!" in c2.out
+ assert "pkg2/0.1: MYFILE-BUILD: my2header!" in c2.out
diff --git a/conans/test/utils/scm.py b/conans/test/utils/scm.py
index 99ca9faf4b4..6802fe9a1f0 100644
--- a/conans/test/utils/scm.py
+++ b/conans/test/utils/scm.py
@@ -15,6 +15,17 @@
from conans.util.runners import check_output_runner
+def git_create_bare_repo(folder=None, reponame="repo.git"):
+ folder = folder or temp_folder()
+ cwd = os.getcwd()
+ try:
+ os.chdir(folder)
+ check_output_runner('git init --bare {}'.format(reponame))
+ return os.path.join(folder, reponame).replace("\\", "/")
+ finally:
+ os.chdir(cwd)
+
+
def create_local_git_repo(files=None, branch=None, submodules=None, folder=None, commits=1,
tags=None, origin_url=None):
tmp = folder or temp_folder()
@@ -23,8 +34,8 @@ def create_local_git_repo(files=None, branch=None, submodules=None, folder=None,
save_files(tmp, files)
git = Git(tmp)
git.run("init .")
- git.run('config user.email "you@example.com"')
git.run('config user.name "Your Name"')
+ git.run('config user.email "you@example.com"')
if branch:
git.run("checkout -b %s" % branch)
@@ -48,6 +59,19 @@ def create_local_git_repo(files=None, branch=None, submodules=None, folder=None,
return tmp.replace("\\", "/"), git.get_revision()
+def git_add_changes_commit(folder, msg="fix"):
+ cwd = os.getcwd()
+ try:
+ os.chdir(folder)
+ # Make sure user and email exist, otherwise it can error
+ check_output_runner('git config user.name "Your Name"')
+ check_output_runner('git config user.email "you@example.com"')
+ check_output_runner('git add . && git commit -m "{}"'.format(msg))
+ return check_output_runner("git rev-parse HEAD").strip()
+ finally:
+ os.chdir(cwd)
+
+
def create_local_svn_checkout(files, repo_url, rel_project_path=None,
commit_msg='default commit message', delete_checkout=True,
folder=None):
| [
{
"components": [
{
"doc": "replaces the current folder contents with the contents of one child folder. This\nis used in the SCM monorepo flow, when it is necessary to use one subproject subfolder\nto replace the whole cloned git repo",
"lines": [
499,
513
],
... | [
"conans/test/functional/tools/scm/test_git.py::TestGitBasicCapture::test_capture_commit_local",
"conans/test/functional/tools/scm/test_git.py::TestGitBasicCapture::test_capture_remote_url",
"conans/test/functional/tools/scm/test_git.py::TestGitBasicCapture::test_capture_remote_pushed_commit",
"conans/test/fun... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
introducing modern conan.tool.scm.Git helper
Changelog: Feature: Introduce new ``conan.tools.scm.Git`` helper, for direct use in ``export()`` method to capture git url and commit, and to be used in ``source()`` method to clone and checkout a git repo.
Docs: https://github.com/conan-io/docs/pull/2419
This is a first iteration, containing only the capturing code that enables the old ``scm`` flow:
- Very flat design, no inheritance, everything explicit.
- Only the minimum methods to implement ``scm`` old flow
- Powerful flexibility can implement the Git monorepo case
AFTER https://github.com/conan-io/conan/pull/10654
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/files/files.py]
(definition of swap_child_folder:)
def swap_child_folder(parent_folder, child_folder):
"""replaces the current folder contents with the contents of one child folder. This
is used in the SCM monorepo flow, when it is necessary to use one subproject subfolder
to replace the whole cloned git repo"""
[end of new definitions in conan/tools/files/files.py]
[start of new definitions in conan/tools/scm/git.py]
(definition of Git:)
class Git(object):
(definition of Git.__init__:)
def __init__(self, conanfile, folder="."):
(definition of Git._run:)
def _run(self, cmd):
(definition of Git.get_commit:)
def get_commit(self):
(definition of Git.get_remote_url:)
def get_remote_url(self, remote="origin"):
(definition of Git.commit_in_remote:)
def commit_in_remote(self, commit, remote="origin"):
(definition of Git.is_dirty:)
def is_dirty(self):
(definition of Git.get_url_and_commit:)
def get_url_and_commit(self, remote="origin"):
(definition of Git.get_repo_root:)
def get_repo_root(self):
(definition of Git.clone:)
def clone(self, url, target=""):
(definition of Git.checkout:)
def checkout(self, commit):
[end of new definitions in conan/tools/scm/git.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
conan-io__conan-10586 | 10,586 | conan-io/conan | null | 7b004e4dc5477f5d27f8a267a3ff4e5dfd482187 | 2022-02-15T16:00:06Z | diff --git a/conan/tools/files/__init__.py b/conan/tools/files/__init__.py
index 64500e1ef08..fcffac80e2e 100644
--- a/conan/tools/files/__init__.py
+++ b/conan/tools/files/__init__.py
@@ -6,3 +6,4 @@
from conan.tools.files.packager import AutoPackager
from conan.tools.files.symlinks import symlinks
from conan.tools.files.copy_pattern import copy
+from conan.tools.files.conandata import update_conandata
diff --git a/conan/tools/files/conandata.py b/conan/tools/files/conandata.py
new file mode 100644
index 00000000000..3d32a25c4da
--- /dev/null
+++ b/conan/tools/files/conandata.py
@@ -0,0 +1,30 @@
+import os
+
+import yaml
+
+from conans.errors import ConanException
+from conans.util.files import load, save
+
+
+def update_conandata(conanfile, data):
+ """
+ this only works for updating the conandata on the export() method, it seems it would
+ be plain wrong to try to change it anywhere else
+ """
+ if not hasattr(conanfile, "export_folder") or conanfile.export_folder is None:
+ raise ConanException("The 'update_conandata()' can only be used in the 'export()' method")
+ path = os.path.join(conanfile.export_folder, "conandata.yml")
+ conandata = load(path)
+ conandata = yaml.safe_load(conandata)
+
+ def recursive_dict_update(d, u):
+ for k, v in u.items():
+ if isinstance(v, dict):
+ d[k] = recursive_dict_update(d.get(k, {}), v)
+ else:
+ d[k] = v
+ return d
+
+ recursive_dict_update(conandata, data)
+ new_content = yaml.safe_dump(conandata)
+ save(path, new_content)
| diff --git a/conans/test/integration/conanfile/conan_data_test.py b/conans/test/integration/conanfile/conan_data_test.py
index 28a60476d65..2511b52059b 100644
--- a/conans/test/integration/conanfile/conan_data_test.py
+++ b/conans/test/integration/conanfile/conan_data_test.py
@@ -255,3 +255,59 @@ def package(self):
self.assertIn("My URL: this url", client.out)
client.run("export-pkg . name/version@ -sf tmp/source -if tmp/install -bf tmp/build")
self.assertIn("My URL: this url", client.out)
+
+
+def test_conandata_update():
+ """ test the update_conandata() helper
+ """
+ c = TestClient()
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ from conan.tools.files import update_conandata
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+ def export(self):
+ update_conandata(self, {"sources": {"0.1": {"commit": 123, "type": "git"},
+ "0.2": {"url": "new"}
+ }
+ })
+
+ def source(self):
+ data = self.conan_data["sources"]
+ self.output.info("0.1-commit: {}!!".format(data["0.1"]["commit"]))
+ self.output.info("0.1-type: {}!!".format(data["0.1"]["type"]))
+ self.output.info("0.1-url: {}!!".format(data["0.1"]["url"]))
+ self.output.info("0.2-url: {}!!".format(data["0.2"]["url"]))
+ """)
+ conandata = textwrap.dedent("""\
+ sources:
+ "0.1":
+ url: myurl
+ commit: 234
+ """)
+ c.save({"conanfile.py": conanfile,
+ "conandata.yml": conandata})
+ c.run("create .")
+ assert "pkg/0.1: 0.1-commit: 123!!" in c.out
+ assert "pkg/0.1: 0.1-type: git!!" in c.out
+ assert "pkg/0.1: 0.1-url: myurl!!" in c.out
+ assert "pkg/0.1: 0.2-url: new!!" in c.out
+
+
+def test_conandata_update_error():
+ """ test the update_conandata() helper fails if used outside export()
+ """
+ c = TestClient()
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ from conan.tools.files import update_conandata
+ class Pkg(ConanFile):
+ name = "pkg"
+ version = "0.1"
+ def source(self):
+ update_conandata(self, {})
+ """)
+ c.save({"conanfile.py": conanfile})
+ c.run("create .", assert_error=True)
+ assert "The 'update_conandata()' can only be used in the 'export()' method" in c.out
| [
{
"components": [
{
"doc": "this only works for updating the conandata on the export() method, it seems it would\nbe plain wrong to try to change it anywhere else",
"lines": [
9,
30
],
"name": "update_conandata",
"signature": "def update_conandat... | [
"conans/test/integration/conanfile/conan_data_test.py::test_conandata_update",
"conans/test/integration/conanfile/conan_data_test.py::test_conandata_update_error"
] | [
"conans/test/integration/conanfile/conan_data_test.py::ConanDataTest::test_conan_data_as_source",
"conans/test/integration/conanfile/conan_data_test.py::ConanDataTest::test_conan_data_as_source_newtools",
"conans/test/integration/conanfile/conan_data_test.py::ConanDataTest::test_conan_data_development_flow",
... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
new update_conandata() helper
Changelog: Feature: New ``from conan.tools.files import update_conandata()`` helper to add data to ``conandata.yml`` in the ``export()`` method.
Docs: https://github.com/conan-io/docs/pull/2422
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/files/conandata.py]
(definition of update_conandata:)
def update_conandata(conanfile, data):
"""this only works for updating the conandata on the export() method, it seems it would
be plain wrong to try to change it anywhere else"""
(definition of update_conandata.recursive_dict_update:)
def recursive_dict_update(d, u):
[end of new definitions in conan/tools/files/conandata.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
matplotlib__matplotlib-22452 | 22,452 | matplotlib/matplotlib | 3.5 | 3eadeacc06c9f2ddcdac6ae39819faa9fbee9e39 | 2022-02-11T20:21:50Z | diff --git a/lib/matplotlib/figure.py b/lib/matplotlib/figure.py
index f8aa99d48a09..441ec703d9cc 100644
--- a/lib/matplotlib/figure.py
+++ b/lib/matplotlib/figure.py
@@ -35,8 +35,10 @@
from matplotlib.axes import Axes, SubplotBase, subplot_class_factory
from matplotlib.gridspec import GridSpec
-from matplotlib.layout_engine import (ConstrainedLayoutEngine,
- TightLayoutEngine, LayoutEngine)
+from matplotlib.layout_engine import (
+ ConstrainedLayoutEngine, TightLayoutEngine, LayoutEngine,
+ PlaceHolderLayoutEngine
+)
import matplotlib.legend as mlegend
from matplotlib.patches import Rectangle
from matplotlib.text import Text
@@ -2382,7 +2384,9 @@ def _check_layout_engines_compat(self, old, new):
If the figure has used the old engine and added a colorbar then the
value of colorbar_gridspec must be the same on the new engine.
"""
- if old is None or old.colorbar_gridspec == new.colorbar_gridspec:
+ if old is None or new is None:
+ return True
+ if old.colorbar_gridspec == new.colorbar_gridspec:
return True
# colorbar layout different, so check if any colorbars are on the
# figure...
@@ -2398,15 +2402,29 @@ def set_layout_engine(self, layout=None, **kwargs):
Parameters
----------
- layout: {'constrained', 'compressed', 'tight'} or `~.LayoutEngine`
- 'constrained' will use `~.ConstrainedLayoutEngine`,
- 'compressed' will also use ConstrainedLayoutEngine, but with a
- correction that attempts to make a good layout for fixed-aspect
- ratio Axes. 'tight' uses `~.TightLayoutEngine`. Users and
- libraries can define their own layout engines as well.
+ layout: {'constrained', 'compressed', 'tight', 'none'} or \
+`LayoutEngine` or None
+
+ - 'constrained' will use `~.ConstrainedLayoutEngine`
+ - 'compressed' will also use `~.ConstrainedLayoutEngine`, but with
+ a correction that attempts to make a good layout for fixed-aspect
+ ratio Axes.
+ - 'tight' uses `~.TightLayoutEngine`
+ - 'none' removes layout engine.
+
+ If `None`, the behavior is controlled by :rc:`figure.autolayout`
+ (which if `True` behaves as if 'tight' were passed) and
+ :rc:`figure.constrained_layout.use` (which if `True` behaves as if
+ 'constrained' were passed). If both are `True`,
+ :rc:`figure.autolayout` takes priority.
+
+ Users and libraries can define their own layout engines and pass
+ the instance directly as well.
+
kwargs: dict
The keyword arguments are passed to the layout engine to set things
like padding and margin sizes. Only used if *layout* is a string.
+
"""
if layout is None:
if mpl.rcParams['figure.autolayout']:
@@ -2423,6 +2441,14 @@ def set_layout_engine(self, layout=None, **kwargs):
elif layout == 'compressed':
new_layout_engine = ConstrainedLayoutEngine(compress=True,
**kwargs)
+ elif layout == 'none':
+ if self._layout_engine is not None:
+ new_layout_engine = PlaceHolderLayoutEngine(
+ self._layout_engine.adjust_compatible,
+ self._layout_engine.colorbar_gridspec
+ )
+ else:
+ new_layout_engine = None
elif isinstance(layout, LayoutEngine):
new_layout_engine = layout
else:
diff --git a/lib/matplotlib/layout_engine.py b/lib/matplotlib/layout_engine.py
index e0b058e601dd..ee04afef19c7 100644
--- a/lib/matplotlib/layout_engine.py
+++ b/lib/matplotlib/layout_engine.py
@@ -100,6 +100,30 @@ def execute(self, fig):
raise NotImplementedError
+class PlaceHolderLayoutEngine(LayoutEngine):
+ """
+ This layout engine does not adjust the figure layout at all.
+
+ The purpose of this `.LayoutEngine` is to act as a place holder when the
+ user removes a layout engine to ensure an incompatible `.LayoutEngine` can
+ not be set later.
+
+ Parameters
+ ----------
+ adjust_compatible, colorbar_gridspec : bool
+ Allow the PlaceHolderLayoutEngine to mirror the behavior of whatever
+ layout engine it is replacing.
+
+ """
+ def __init__(self, adjust_compatible, colorbar_gridspec, **kwargs):
+ self._adjust_compatible = adjust_compatible
+ self._colorbar_gridspec = colorbar_gridspec
+ super().__init__(**kwargs)
+
+ def execute(self, fig):
+ return
+
+
class TightLayoutEngine(LayoutEngine):
"""
Implements the ``tight_layout`` geometry management. See
| diff --git a/lib/matplotlib/tests/test_figure.py b/lib/matplotlib/tests/test_figure.py
index ec1a814ebc59..b2da459d6cf9 100644
--- a/lib/matplotlib/tests/test_figure.py
+++ b/lib/matplotlib/tests/test_figure.py
@@ -17,7 +17,8 @@
from matplotlib.axes import Axes
from matplotlib.figure import Figure, FigureBase
from matplotlib.layout_engine import (ConstrainedLayoutEngine,
- TightLayoutEngine)
+ TightLayoutEngine,
+ PlaceHolderLayoutEngine)
from matplotlib.ticker import AutoMinorLocator, FixedFormatter, ScalarFormatter
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
@@ -578,6 +579,9 @@ def test_invalid_layouts():
fig, ax = plt.subplots(layout="constrained")
pc = ax.pcolormesh(np.random.randn(2, 2))
fig.colorbar(pc)
+ with pytest.raises(RuntimeError, match='Colorbar layout of new layout'):
+ fig.set_layout_engine("tight")
+ fig.set_layout_engine("none")
with pytest.raises(RuntimeError, match='Colorbar layout of new layout'):
fig.set_layout_engine("tight")
@@ -586,6 +590,11 @@ def test_invalid_layouts():
fig.colorbar(pc)
with pytest.raises(RuntimeError, match='Colorbar layout of new layout'):
fig.set_layout_engine("constrained")
+ fig.set_layout_engine("none")
+ assert isinstance(fig.get_layout_engine(), PlaceHolderLayoutEngine)
+
+ with pytest.raises(RuntimeError, match='Colorbar layout of new layout'):
+ fig.set_layout_engine("constrained")
@check_figures_equal(extensions=["png", "pdf"])
| [
{
"components": [
{
"doc": "This layout engine does not adjust the figure layout at all.\n\nThe purpose of this `.LayoutEngine` is to act as a place holder when the\nuser removes a layout engine to ensure an incompatible `.LayoutEngine` can\nnot be set later.\n\nParameters\n----------\nadjust_comp... | [
"lib/matplotlib/tests/test_figure.py::test_align_labels[png]",
"lib/matplotlib/tests/test_figure.py::test_align_labels_stray_axes",
"lib/matplotlib/tests/test_figure.py::test_figure_label",
"lib/matplotlib/tests/test_figure.py::test_fignum_exists",
"lib/matplotlib/tests/test_figure.py::test_clf_keyword",
... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
ENH: add ability to remove layout engine
## PR Summary
This may be too simplistic as it just sets it to None which gives you ability
to "go through zero" and change to an incompatible layout engine.
## PR Checklist
<!-- Please mark any checkboxes that do not apply to this PR as [N/A]. -->
**Tests and Styling**
- [x] Has pytest style unit tests (and `pytest` passes).
- [x] Is [Flake 8](https://flake8.pycqa.org/en/latest/) compliant (install `flake8-docstrings` and run `flake8 --docstring-convention=all`).
**Documentation**
- [x] New features are documented, with examples if plot related.
- [N/A] New features have an entry in `doc/users/next_whats_new/` (follow instructions in README.rst there).
- [N/A] API changes documented in `doc/api/next_api_changes/` (follow instructions in README.rst there).
- [x] Documentation is sphinx and numpydoc compliant (the docs should [build](https://matplotlib.org/devel/documenting_mpl.html#building-the-docs) without error).
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in lib/matplotlib/layout_engine.py]
(definition of PlaceHolderLayoutEngine:)
class PlaceHolderLayoutEngine(LayoutEngine):
"""This layout engine does not adjust the figure layout at all.
The purpose of this `.LayoutEngine` is to act as a place holder when the
user removes a layout engine to ensure an incompatible `.LayoutEngine` can
not be set later.
Parameters
----------
adjust_compatible, colorbar_gridspec : bool
Allow the PlaceHolderLayoutEngine to mirror the behavior of whatever
layout engine it is replacing."""
(definition of PlaceHolderLayoutEngine.__init__:)
def __init__(self, adjust_compatible, colorbar_gridspec, **kwargs):
(definition of PlaceHolderLayoutEngine.execute:)
def execute(self, fig):
[end of new definitions in lib/matplotlib/layout_engine.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3d6c3da884fafae4654df68144391cfe9be6f134 | ||
prompt-toolkit__python-prompt-toolkit-1566 | 1,566 | prompt-toolkit/python-prompt-toolkit | null | 700ae9eed535090c84ffcde7003fd3c295b7b1ad | 2022-02-10T16:17:55Z | diff --git a/prompt_toolkit/formatted_text/html.py b/prompt_toolkit/formatted_text/html.py
index 06c6020f5..9a5213227 100644
--- a/prompt_toolkit/formatted_text/html.py
+++ b/prompt_toolkit/formatted_text/html.py
@@ -1,4 +1,5 @@
import xml.dom.minidom as minidom
+from string import Formatter
from typing import Any, List, Tuple, Union
from .base import FormattedText, StyleAndTextTuples
@@ -107,11 +108,7 @@ def format(self, *args: object, **kwargs: object) -> "HTML":
Like `str.format`, but make sure that the arguments are properly
escaped.
"""
- # Escape all the arguments.
- escaped_args = [html_escape(a) for a in args]
- escaped_kwargs = {k: html_escape(v) for k, v in kwargs.items()}
-
- return HTML(self.value.format(*escaped_args, **escaped_kwargs))
+ return HTML(FORMATTER.vformat(self.value, args, kwargs))
def __mod__(self, value: Union[object, Tuple[object, ...]]) -> "HTML":
"""
@@ -124,6 +121,11 @@ def __mod__(self, value: Union[object, Tuple[object, ...]]) -> "HTML":
return HTML(self.value % value)
+class HTMLFormatter(Formatter):
+ def format_field(self, value: object, format_spec: str) -> str:
+ return html_escape(format(value, format_spec))
+
+
def html_escape(text: object) -> str:
# The string interpolation functions also take integers and other types.
# Convert to string first.
@@ -136,3 +138,6 @@ def html_escape(text: object) -> str:
.replace(">", ">")
.replace('"', """)
)
+
+
+FORMATTER = HTMLFormatter()
| diff --git a/tests/test_formatted_text.py b/tests/test_formatted_text.py
index 23f5ccfd5..a49c67202 100644
--- a/tests/test_formatted_text.py
+++ b/tests/test_formatted_text.py
@@ -141,6 +141,9 @@ def test_html_interpolation():
value = HTML("<b>{a}</b><u>{b}</u>").format(a="hello", b="world")
assert to_formatted_text(value) == [("class:b", "hello"), ("class:u", "world")]
+ value = HTML("<b>{:02d}</b><u>{:.3f}</u>").format(3, 3.14159)
+ assert to_formatted_text(value) == [("class:b", "03"), ("class:u", "3.142")]
+
def test_merge_formatted_text():
html1 = HTML("<u>hello</u>")
| [
{
"components": [
{
"doc": "",
"lines": [
124,
126
],
"name": "HTMLFormatter",
"signature": "class HTMLFormatter(Formatter):",
"type": "class"
},
{
"doc": "",
"lines": [
125,
126
],
... | [
"tests/test_formatted_text.py::test_html_interpolation"
] | [
"tests/test_formatted_text.py::test_basic_html",
"tests/test_formatted_text.py::test_html_with_fg_bg",
"tests/test_formatted_text.py::test_ansi_formatting",
"tests/test_formatted_text.py::test_ansi_256_color",
"tests/test_formatted_text.py::test_ansi_true_color",
"tests/test_formatted_text.py::test_interp... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
HTML.format specifiers
html_escape parameters after formatting, not before
Allows things like HTML("<b>{.3f}</b>").format(3.14159)
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in prompt_toolkit/formatted_text/html.py]
(definition of HTMLFormatter:)
class HTMLFormatter(Formatter):
(definition of HTMLFormatter.format_field:)
def format_field(self, value: object, format_spec: str) -> str:
[end of new definitions in prompt_toolkit/formatted_text/html.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 700ae9eed535090c84ffcde7003fd3c295b7b1ad | ||
Textualize__rich-1941 | 1,941 | Textualize/rich | null | 83756d624022d88082cf08ad49613d15b86ced21 | 2022-02-09T11:41:58Z | diff --git a/CHANGELOG.md b/CHANGELOG.md
index fcf58cd7a1..76195aedcb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## Unreleased
+### Added
+
+- Added ProgressColumn `MofNCompleteColumn` to display raw `completed/total` column (similar to DownloadColumn,
+ but displays values as ints, does not convert to floats or add bit/bytes units).
+ https://github.com/Textualize/rich/pull/1941
+
### Fixed
- In Jupyter mode make the link target be set to "_blank"
diff --git a/docs/source/progress.rst b/docs/source/progress.rst
index 1a4e02ed7d..4ce3588368 100644
--- a/docs/source/progress.rst
+++ b/docs/source/progress.rst
@@ -113,12 +113,21 @@ The defaults are roughly equivalent to the following::
TimeRemainingColumn(),
)
+To create a Progress with your own columns in addition to the defaults, use :meth:`~rich.progress.Progress.get_default_columns`::
+
+ progress = Progress(
+ SpinnerColumn(),
+ *Progress.get_default_columns(),
+ TimeElapsedColumn(),
+ )
+
The following column objects are available:
- :class:`~rich.progress.BarColumn` Displays the bar.
- :class:`~rich.progress.TextColumn` Displays text.
- :class:`~rich.progress.TimeElapsedColumn` Displays the time elapsed.
- :class:`~rich.progress.TimeRemainingColumn` Displays the estimated time remaining.
+- :class:`~rich.progress.MofNCompleteColumn` Displays completion progress as ``"{task.completed}/{task.total}"`` (works best if completed and total are ints).
- :class:`~rich.progress.FileSizeColumn` Displays progress as file size (assumes the steps are bytes).
- :class:`~rich.progress.TotalFileSizeColumn` Displays total file size (assumes the steps are bytes).
- :class:`~rich.progress.DownloadColumn` Displays download progress (assumes the steps are bytes).
@@ -128,6 +137,7 @@ The following column objects are available:
To implement your own columns, extend the :class:`~rich.progress.ProgressColumn` class and use it as you would the other columns.
+
Table Columns
~~~~~~~~~~~~~
diff --git a/rich/progress.py b/rich/progress.py
index 115ae81608..bbbdf70ef7 100644
--- a/rich/progress.py
+++ b/rich/progress.py
@@ -375,6 +375,33 @@ def render(self, task: "Task") -> Text:
return Text(data_size, style="progress.filesize.total")
+class MofNCompleteColumn(ProgressColumn):
+ """Renders completed count/total, e.g. ' 10/1000'.
+
+ Best for bounded tasks with int quantities.
+
+ Space pads the completed count so that progress length does not change as task progresses
+ past powers of 10.
+
+ Args:
+ separator (str, optional): Text to separate completed and total values. Defaults to "/".
+ """
+
+ def __init__(self, separator: str = "/", table_column: Optional[Column] = None):
+ self.separator = separator
+ super().__init__(table_column=table_column)
+
+ def render(self, task: "Task") -> Text:
+ """Show completed/total."""
+ completed = int(task.completed)
+ total = int(task.total)
+ total_width = len(str(total))
+ return Text(
+ f"{completed:{total_width}d}{self.separator}{total}",
+ style="progress.download",
+ )
+
+
class DownloadColumn(ProgressColumn):
"""Renders file size downloaded and total, e.g. '0.5/2.3 GB'.
| diff --git a/tests/test_progress.py b/tests/test_progress.py
index 20b9d32ed4..2dd53ccd2c 100644
--- a/tests/test_progress.py
+++ b/tests/test_progress.py
@@ -16,6 +16,7 @@
TransferSpeedColumn,
RenderableColumn,
SpinnerColumn,
+ MofNCompleteColumn,
Progress,
Task,
TextColumn,
@@ -311,6 +312,8 @@ def test_columns() -> None:
TotalFileSizeColumn(),
DownloadColumn(),
TransferSpeedColumn(),
+ MofNCompleteColumn(),
+ MofNCompleteColumn(separator=" of "),
transient=True,
console=console,
auto_refresh=False,
@@ -330,7 +333,8 @@ def test_columns() -> None:
result = replace_link_ids(console.file.getvalue())
print(repr(result))
- expected = "\x1b[?25ltest foo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:16\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m\r\x1b[2K\x1b[1A\x1b[2Kfoo\ntest foo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:16\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m\r\x1b[2K\x1b[1A\x1b[2K\x1b[2;36m[TIME]\x1b[0m\x1b[2;36m \x1b[0mhello \ntest foo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:16\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m\r\x1b[2K\x1b[1A\x1b[2Kworld\ntest foo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:16\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m\r\x1b[2K\x1b[1A\x1b[2Ktest foo \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:30\x1b[0m \x1b[32m12 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m12/10 bytes\x1b[0m \x1b[31m1 byte/s \x1b[0m\ntest bar \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:25\x1b[0m \x1b[32m16 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m16/7 bytes \x1b[0m \x1b[31m2 bytes/s\x1b[0m\r\x1b[2K\x1b[1A\x1b[2Ktest foo \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:30\x1b[0m \x1b[32m12 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m12/10 bytes\x1b[0m \x1b[31m1 byte/s \x1b[0m\ntest bar \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:25\x1b[0m \x1b[32m16 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m16/7 bytes \x1b[0m \x1b[31m2 bytes/s\x1b[0m\n\x1b[?25h\r\x1b[1A\x1b[2K\x1b[1A\x1b[2K"
+ expected = "\x1b[?25ltest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2Kfoo\ntest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2K\x1b[2;36m[TIME]\x1b[0m\x1b[2;36m \x1b[0mhello \ntest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2Kworld\ntest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2Ktest foo \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:34\x1b[0m \x1b[32m12 \x1b[0m \x1b[32m10 \x1b[0m \x1b[32m12/10 \x1b[0m \x1b[31m1 \x1b[0m \x1b[32m12/10\x1b[0m \x1b[32m12 of 10\x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbyte/s \x1b[0m \ntest bar \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:29\x1b[0m \x1b[32m16 \x1b[0m \x1b[32m7 bytes\x1b[0m \x1b[32m16/7 \x1b[0m \x1b[31m2 \x1b[0m \x1b[32m16/7 \x1b[0m \x1b[32m16 of 7 \x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbytes/s\x1b[0m \r\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2Ktest foo \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:34\x1b[0m \x1b[32m12 \x1b[0m \x1b[32m10 \x1b[0m \x1b[32m12/10 \x1b[0m \x1b[31m1 \x1b[0m \x1b[32m12/10\x1b[0m \x1b[32m12 of 10\x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbyte/s \x1b[0m \ntest bar \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:29\x1b[0m \x1b[32m16 \x1b[0m \x1b[32m7 bytes\x1b[0m \x1b[32m16/7 \x1b[0m \x1b[31m2 \x1b[0m \x1b[32m16/7 \x1b[0m \x1b[32m16 of 7 \x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbytes/s\x1b[0m \n\x1b[?25h\r\x1b[1A\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2K"
+
assert result == expected
| diff --git a/CHANGELOG.md b/CHANGELOG.md
index fcf58cd7a1..76195aedcb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## Unreleased
+### Added
+
+- Added ProgressColumn `MofNCompleteColumn` to display raw `completed/total` column (similar to DownloadColumn,
+ but displays values as ints, does not convert to floats or add bit/bytes units).
+ https://github.com/Textualize/rich/pull/1941
+
### Fixed
- In Jupyter mode make the link target be set to "_blank"
diff --git a/docs/source/progress.rst b/docs/source/progress.rst
index 1a4e02ed7d..4ce3588368 100644
--- a/docs/source/progress.rst
+++ b/docs/source/progress.rst
@@ -113,12 +113,21 @@ The defaults are roughly equivalent to the following::
TimeRemainingColumn(),
)
+To create a Progress with your own columns in addition to the defaults, use :meth:`~rich.progress.Progress.get_default_columns`::
+
+ progress = Progress(
+ SpinnerColumn(),
+ *Progress.get_default_columns(),
+ TimeElapsedColumn(),
+ )
+
The following column objects are available:
- :class:`~rich.progress.BarColumn` Displays the bar.
- :class:`~rich.progress.TextColumn` Displays text.
- :class:`~rich.progress.TimeElapsedColumn` Displays the time elapsed.
- :class:`~rich.progress.TimeRemainingColumn` Displays the estimated time remaining.
+- :class:`~rich.progress.MofNCompleteColumn` Displays completion progress as ``"{task.completed}/{task.total}"`` (works best if completed and total are ints).
- :class:`~rich.progress.FileSizeColumn` Displays progress as file size (assumes the steps are bytes).
- :class:`~rich.progress.TotalFileSizeColumn` Displays total file size (assumes the steps are bytes).
- :class:`~rich.progress.DownloadColumn` Displays download progress (assumes the steps are bytes).
@@ -128,6 +137,7 @@ The following column objects are available:
To implement your own columns, extend the :class:`~rich.progress.ProgressColumn` class and use it as you would the other columns.
+
Table Columns
~~~~~~~~~~~~~
| [
{
"components": [
{
"doc": "Renders completed count/total, e.g. ' 10/1000'.\n\nBest for bounded tasks with int quantities.\n\nSpace pads the completed count so that progress length does not change as task progresses\npast powers of 10.\n\nArgs:\n separator (str, optional): Text to separate com... | [
"tests/test_progress.py::test_bar_columns",
"tests/test_progress.py::test_text_column",
"tests/test_progress.py::test_time_elapsed_column",
"tests/test_progress.py::test_time_remaining_column",
"tests/test_progress.py::test_renderable_column",
"tests/test_progress.py::test_spinner_column",
"tests/test_p... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add MofNCompleteColumn ProgressColumn type
## Type of changes
- [ ] Bug fix
- [X] New feature
- [ ] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [X] I've run the latest [black](https://github.com/psf/black) with default args on new code.
- [X] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [X] I've added tests for new code.
- [X] I accept that @willmcgugan may be pedantic in the code review.
## Description
Added ProgressColumn `MofNCompleteColumn` to display raw `completed/total` column (similar to DownloadColumn, but displays values as ints, does not convert to floats or add bit/bytes units).
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rich/progress.py]
(definition of MofNCompleteColumn:)
class MofNCompleteColumn(ProgressColumn):
"""Renders completed count/total, e.g. ' 10/1000'.
Best for bounded tasks with int quantities.
Space pads the completed count so that progress length does not change as task progresses
past powers of 10.
Args:
separator (str, optional): Text to separate completed and total values. Defaults to "/"."""
(definition of MofNCompleteColumn.__init__:)
def __init__(self, separator: str = "/", table_column: Optional[Column] = None):
(definition of MofNCompleteColumn.render:)
def render(self, task: "Task") -> Text:
"""Show completed/total."""
[end of new definitions in rich/progress.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | b0661de34bab35af9b4b1d3ba8e28b186b225e84 | |
joke2k__faker-1610 | 1,610 | joke2k/faker | null | fc127fa65b7d16817b1fcea6f4186ef417ed4fcc | 2022-02-08T21:58:20Z | diff --git a/faker/providers/lorem/de_AT/__init__.py b/faker/providers/lorem/de_AT/__init__.py
new file mode 100644
index 0000000000..c52b5b7d73
--- /dev/null
+++ b/faker/providers/lorem/de_AT/__init__.py
@@ -0,0 +1,9 @@
+from ..de_DE import Provider as GermanProvider
+
+
+class Provider(GermanProvider):
+ """Implement lorem provider for ``de_DE`` locale.
+ Using the same as in ```de_DE```.
+ """
+
+ pass
diff --git a/faker/providers/lorem/de_DE/__init__.py b/faker/providers/lorem/de_DE/__init__.py
new file mode 100644
index 0000000000..95b22558c8
--- /dev/null
+++ b/faker/providers/lorem/de_DE/__init__.py
@@ -0,0 +1,523 @@
+from .. import Provider as LoremProvider
+
+
+class Provider(LoremProvider):
+ """Implement lorem provider for ``de_DE`` locale.
+
+ Word list is based on the source below, and some words have been removed
+ because of some duplications.
+
+ Sources:
+
+ - https://www.gut1.de/grundwortschatz/grundwortschatz-500/
+ """
+
+ word_list = (
+ "ab",
+ "Abend",
+ "aber",
+ "acht",
+ "Affe",
+ "alle",
+ "allein",
+ "als",
+ "also",
+ "alt",
+ "am",
+ "an",
+ "andere",
+ "anfangen",
+ "Angst",
+ "antworten",
+ "Apfel",
+ "Arbeit",
+ "arbeiten",
+ "Arzt",
+ "auch",
+ "auf",
+ "Auge",
+ "aus",
+ "Auto",
+ "baden",
+ "bald",
+ "Ball",
+ "bauen",
+ "Bauer",
+ "Baum",
+ "bei",
+ "beide",
+ "beim",
+ "Bein",
+ "Beispiel",
+ "beißen",
+ "bekommen",
+ "Berg",
+ "besser",
+ "Bett",
+ "Bild",
+ "bin",
+ "bis",
+ "blau",
+ "bleiben",
+ "Blume",
+ "Boden",
+ "brauchen",
+ "braun",
+ "Brief",
+ "bringen",
+ "Brot",
+ "Bruder",
+ "Buch",
+ "böse",
+ "da",
+ "dabei",
+ "dafür",
+ "damit",
+ "danach",
+ "dann",
+ "daran",
+ "darauf",
+ "darin",
+ "das",
+ "dauern",
+ "davon",
+ "dazu",
+ "dein",
+ "dem",
+ "den",
+ "denken",
+ "denn",
+ "der",
+ "deshalb",
+ "dich",
+ "dick",
+ "die",
+ "Ding",
+ "dir",
+ "doch",
+ "Dorf",
+ "dort",
+ "draußen",
+ "drehen",
+ "drei",
+ "dumm",
+ "dunkel",
+ "durch",
+ "dürfen",
+ "eigentlich",
+ "ein",
+ "einfach",
+ "einige",
+ "einigen",
+ "einmal",
+ "Eis",
+ "Eltern",
+ "Ende",
+ "endlich",
+ "er",
+ "Erde",
+ "erklären",
+ "erschrecken",
+ "erst",
+ "erzählen",
+ "es",
+ "essen",
+ "Essen",
+ "etwas",
+ "fahren",
+ "Fahrrad",
+ "fallen",
+ "Familie",
+ "fangen",
+ "fast",
+ "fehlen",
+ "Fenster",
+ "Ferien",
+ "fertig",
+ "fest",
+ "Feuer",
+ "fiel",
+ "finden",
+ "Finger",
+ "Fisch",
+ "Flasche",
+ "fliegen",
+ "Frage",
+ "fragen",
+ "Frau",
+ "frei",
+ "fressen",
+ "Freude",
+ "freuen",
+ "Freund",
+ "fröhlich",
+ "früh",
+ "früher",
+ "Fuß",
+ "Fußball",
+ "fährt",
+ "führen",
+ "fünf",
+ "für",
+ "gab",
+ "ganz",
+ "gar",
+ "Garten",
+ "geben",
+ "Geburtstag",
+ "gefährlich",
+ "gegen",
+ "gehen",
+ "gehören",
+ "gelb",
+ "Geld",
+ "genau",
+ "gerade",
+ "gern",
+ "Geschenk",
+ "Geschichte",
+ "Gesicht",
+ "gestern",
+ "gesund",
+ "gewinnen",
+ "gibt",
+ "ging",
+ "Glas",
+ "glauben",
+ "gleich",
+ "Glück",
+ "glücklich",
+ "Gott",
+ "groß",
+ "grün",
+ "gut",
+ "Haare",
+ "haben",
+ "halbe",
+ "halten",
+ "Hand",
+ "hart",
+ "Hase",
+ "hat",
+ "Haus",
+ "heiß",
+ "heißen",
+ "helfen",
+ "her",
+ "heraus",
+ "Herr",
+ "Herz",
+ "heute",
+ "hier",
+ "Hilfe",
+ "Himmel",
+ "hin",
+ "hinein",
+ "hinter",
+ "hoch",
+ "holen",
+ "Hund",
+ "Hunger",
+ "hängen",
+ "hören",
+ "ich",
+ "ihm",
+ "ihn",
+ "ihr",
+ "im",
+ "immer",
+ "in",
+ "ins",
+ "ist",
+ "ja",
+ "Jahr",
+ "jeder",
+ "jetzt",
+ "jung",
+ "Junge",
+ "kalt",
+ "kam",
+ "kann",
+ "Katze",
+ "kaufen",
+ "kein",
+ "kennen",
+ "Kind",
+ "Klasse",
+ "klein",
+ "klettern",
+ "kochen",
+ "kommen",
+ "Kopf",
+ "krank",
+ "kurz",
+ "können",
+ "Küche",
+ "lachen",
+ "Land",
+ "lange",
+ "langsam",
+ "las",
+ "lassen",
+ "laufen",
+ "laut",
+ "leben",
+ "Leben",
+ "legen",
+ "Lehrer",
+ "Lehrerin",
+ "leicht",
+ "leise",
+ "lernen",
+ "lesen",
+ "letzte",
+ "Leute",
+ "Licht",
+ "lieb",
+ "liegen",
+ "ließ",
+ "Loch",
+ "los",
+ "Luft",
+ "lustig",
+ "machen",
+ "mal",
+ "Mama",
+ "man",
+ "Mann",
+ "Maus",
+ "Meer",
+ "mehr",
+ "mein",
+ "Mensch",
+ "merken",
+ "mich",
+ "Milch",
+ "Minute",
+ "Minutenmir",
+ "mit",
+ "Monat",
+ "Monate",
+ "Musik",
+ "Mutter",
+ "Mädchen",
+ "mögen",
+ "möglich",
+ "müde",
+ "müssen",
+ "nach",
+ "Nacht",
+ "nah",
+ "Name",
+ "Nase",
+ "nass",
+ "natürlich",
+ "neben",
+ "nehmen",
+ "nein",
+ "nennen",
+ "neu",
+ "neun",
+ "nicht",
+ "nichts",
+ "nie",
+ "nimmt",
+ "noch",
+ "nun",
+ "nur",
+ "nächste",
+ "nämlich",
+ "ob",
+ "oben",
+ "oder",
+ "offen",
+ "oft",
+ "ohne",
+ "Oma",
+ "Onkel",
+ "Opa",
+ "packen",
+ "Papa",
+ "Pferd",
+ "Platz",
+ "plötzlich",
+ "Polizei",
+ "Rad",
+ "rechnen",
+ "reich",
+ "reiten",
+ "rennen",
+ "richtig",
+ "rot",
+ "rufen",
+ "ruhig",
+ "rund",
+ "Sache",
+ "sagen",
+ "schaffen",
+ "schauen",
+ "scheinen",
+ "schenken",
+ "schicken",
+ "Schiff",
+ "schlafen",
+ "schlagen",
+ "schlecht",
+ "schlimm",
+ "Schluss",
+ "Schnee",
+ "schnell",
+ "schon",
+ "schreiben",
+ "schreien",
+ "Schuh",
+ "Schule",
+ "schwarz",
+ "schwer",
+ "Schwester",
+ "schwimmen",
+ "schön",
+ "Schüler",
+ "sechs",
+ "See",
+ "sehen",
+ "sehr",
+ "sein",
+ "seit",
+ "Seite",
+ "selbst",
+ "setzen",
+ "sich",
+ "sicher",
+ "sie",
+ "sieben",
+ "sieht",
+ "sind",
+ "singen",
+ "sitzen",
+ "so",
+ "sofort",
+ "Sohn",
+ "sollen",
+ "Sommer",
+ "Sonne",
+ "Sonntag",
+ "sonst",
+ "Spaß",
+ "Spiel",
+ "spielen",
+ "sprechen",
+ "springen",
+ "spät",
+ "später",
+ "Stadt",
+ "stark",
+ "stehen",
+ "steigen",
+ "Stein",
+ "Stelle",
+ "stellen",
+ "Straße",
+ "Stunde",
+ "Stück",
+ "suchen",
+ "Tag",
+ "Tante",
+ "Teller",
+ "tief",
+ "Tier",
+ "Tisch",
+ "tot",
+ "tragen",
+ "traurig",
+ "treffen",
+ "trinken",
+ "tun",
+ "turnen",
+ "Tür",
+ "Uhr",
+ "um",
+ "und",
+ "uns",
+ "unser",
+ "unten",
+ "unter",
+ "Vater",
+ "vergessen",
+ "verkaufen",
+ "verlieren",
+ "verstecken",
+ "verstehen",
+ "versuchen",
+ "viel",
+ "vielleicht",
+ "vier",
+ "Vogel",
+ "voll",
+ "vom",
+ "von",
+ "vor",
+ "vorbei",
+ "Wagen",
+ "wahr",
+ "Wald",
+ "war",
+ "warm",
+ "warten",
+ "warum",
+ "was",
+ "waschen",
+ "Wasser",
+ "weg",
+ "Weg",
+ "Weihnachten",
+ "weil",
+ "weinen",
+ "weit",
+ "weiter",
+ "weiß",
+ "Welt",
+ "wenig",
+ "wenn",
+ "wer",
+ "werden",
+ "werfen",
+ "Wetter",
+ "wichtig",
+ "wie",
+ "wieder",
+ "Wiese",
+ "will",
+ "Winter",
+ "wir",
+ "wird",
+ "wirklich",
+ "wissen",
+ "Wissen",
+ "wo",
+ "Woche",
+ "wohl",
+ "wohnen",
+ "Wohnung",
+ "wollen",
+ "Wort",
+ "wünschen",
+ "Zahl",
+ "zehn",
+ "zeigen",
+ "Zeit",
+ "Zeitung",
+ "ziehen",
+ "Zimmer",
+ "zu",
+ "Zug",
+ "zum",
+ "zur",
+ "zurück",
+ "zusammen",
+ "zwei",
+ "zwischen",
+ "öffnen",
+ "über",
+ "überall",
+ )
| diff --git a/tests/providers/test_lorem.py b/tests/providers/test_lorem.py
index b96a14ca86..0838017e47 100644
--- a/tests/providers/test_lorem.py
+++ b/tests/providers/test_lorem.py
@@ -4,6 +4,8 @@
from faker.providers.lorem.az_AZ import Provider as AzAzLoremProvider
from faker.providers.lorem.cs_CZ import Provider as CsCzLoremProvider
+from faker.providers.lorem.de_AT import Provider as DeAtLoremProvider
+from faker.providers.lorem.de_DE import Provider as DeDeLoremProvider
class TestLoremProvider:
@@ -313,3 +315,141 @@ def test_words(self, faker, num_samples):
for _ in range(num_samples):
words = faker.words(num_words)
assert all(isinstance(word, str) and word in AzAzLoremProvider.word_list for word in words)
+
+
+class TestDeDe:
+ """Test ```de_DE``` lorem provider"""
+
+ word_list = [word.lower() for word in DeDeLoremProvider.word_list]
+
+ def test_paragraph(self, faker, num_samples):
+ num_sentences = 10
+ for _ in range(num_samples):
+ paragraph = faker.paragraph(nb_sentences=num_sentences)
+ assert isinstance(paragraph, str)
+ words = paragraph.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_paragraphs(self, faker, num_samples):
+ num_paragraphs = 5
+ for _ in range(num_samples):
+ paragraphs = faker.paragraphs(nb=num_paragraphs)
+ for paragraph in paragraphs:
+ assert isinstance(paragraph, str)
+ words = paragraph.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_sentence(self, faker, num_samples):
+ num_words = 10
+ for _ in range(num_samples):
+ sentence = faker.sentence(nb_words=num_words)
+ assert isinstance(sentence, str)
+ words = sentence.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_sentences(self, faker, num_samples):
+ num_sentences = 5
+ for _ in range(num_samples):
+ sentences = faker.sentences(nb=num_sentences)
+ for sentence in sentences:
+ assert isinstance(sentence, str)
+ words = sentence.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_text(self, faker, num_samples):
+ num_chars = 25
+ for _ in range(num_samples):
+ text = faker.text(max_nb_chars=num_chars)
+ assert isinstance(text, str)
+ words = re.sub(r"[.\n]+", " ", text).split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_texts(self, faker, num_samples):
+ num_texts = 5
+ num_chars = 25
+ for _ in range(num_samples):
+ texts = faker.texts(max_nb_chars=num_chars, nb_texts=num_texts)
+ for text in texts:
+ assert isinstance(text, str)
+ words = re.sub(r"[.\n]+", " ", text).split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_word(self, faker, num_samples):
+ for _ in range(num_samples):
+ word = faker.word()
+ assert isinstance(word, str) and word in DeDeLoremProvider.word_list
+
+ def test_words(self, faker, num_samples):
+ num_words = 5
+ for _ in range(num_samples):
+ words = faker.words(num_words)
+ assert all(isinstance(word, str) and word in DeDeLoremProvider.word_list for word in words)
+
+
+class TestDeAt:
+ """Test ```de_AT``` lorem provider"""
+
+ word_list = [word.lower() for word in DeAtLoremProvider.word_list]
+
+ def test_paragraph(self, faker, num_samples):
+ num_sentences = 10
+ for _ in range(num_samples):
+ paragraph = faker.paragraph(nb_sentences=num_sentences)
+ assert isinstance(paragraph, str)
+ words = paragraph.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_paragraphs(self, faker, num_samples):
+ num_paragraphs = 5
+ for _ in range(num_samples):
+ paragraphs = faker.paragraphs(nb=num_paragraphs)
+ for paragraph in paragraphs:
+ assert isinstance(paragraph, str)
+ words = paragraph.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_sentence(self, faker, num_samples):
+ num_words = 10
+ for _ in range(num_samples):
+ sentence = faker.sentence(nb_words=num_words)
+ assert isinstance(sentence, str)
+ words = sentence.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_sentences(self, faker, num_samples):
+ num_sentences = 5
+ for _ in range(num_samples):
+ sentences = faker.sentences(nb=num_sentences)
+ for sentence in sentences:
+ assert isinstance(sentence, str)
+ words = sentence.replace(".", "").split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_text(self, faker, num_samples):
+ num_chars = 25
+ for _ in range(num_samples):
+ text = faker.text(max_nb_chars=num_chars)
+ assert isinstance(text, str)
+ words = re.sub(r"[.\n]+", " ", text).split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_texts(self, faker, num_samples):
+ num_texts = 5
+ num_chars = 25
+ for _ in range(num_samples):
+ texts = faker.texts(max_nb_chars=num_chars, nb_texts=num_texts)
+ for text in texts:
+ assert isinstance(text, str)
+ words = re.sub(r"[.\n]+", " ", text).split()
+ assert all(word.lower() in self.word_list for word in words)
+
+ def test_word(self, faker, num_samples):
+ for _ in range(num_samples):
+ word = faker.word()
+ assert isinstance(word, str) and word in DeAtLoremProvider.word_list
+
+ def test_words(self, faker, num_samples):
+ num_words = 5
+ for _ in range(num_samples):
+ words = faker.words(num_words)
+ assert all(isinstance(word, str) and word in DeAtLoremProvider.word_list for word in words)
| [
{
"components": [
{
"doc": "Implement lorem provider for ``de_DE`` locale.\nUsing the same as in ```de_DE```.",
"lines": [
4,
9
],
"name": "Provider",
"signature": "class Provider(GermanProvider):",
"type": "class"
}
],
"fil... | [
"tests/providers/test_lorem.py::TestLoremProvider::test_word_with_defaults",
"tests/providers/test_lorem.py::TestLoremProvider::test_word_with_custom_list",
"tests/providers/test_lorem.py::TestLoremProvider::test_words_with_zero_nb",
"tests/providers/test_lorem.py::TestLoremProvider::test_words_with_defaults"... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Closes #936 Adding lorem support for german
This adds lorem support for the german language. As described in #936.
It uses 500 words of this site: https://www.gut1.de/grundwortschatz/grundwortschatz-500/
Some words are changed, because of grammatical duplications.
The language is also changed for `de_AT`, because it is basiclly the same.
A test for `de_DE` is also added.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/lorem/de_AT/__init__.py]
(definition of Provider:)
class Provider(GermanProvider):
"""Implement lorem provider for ``de_DE`` locale.
Using the same as in ```de_DE```."""
[end of new definitions in faker/providers/lorem/de_AT/__init__.py]
[start of new definitions in faker/providers/lorem/de_DE/__init__.py]
(definition of Provider:)
class Provider(LoremProvider):
"""Implement lorem provider for ``de_DE`` locale.
Word list is based on the source below, and some words have been removed
because of some duplications.
Sources:
- https://www.gut1.de/grundwortschatz/grundwortschatz-500/"""
[end of new definitions in faker/providers/lorem/de_DE/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
wrong language in faker.word() method
### Steps to reproduce
1. from faker import Faker
1. fake = Faker('de_DE')
1. fake.word()
### Expected behavior
Generated word should be in German language
### Actual behavior
Generated word is in Latin. The same situation is with Spanish word() method.
----------
The issue is that the there is no German (or Spanish) localization for the `word` method.
We usually fall back to `en_US` when a localization is missing (https://github.com/joke2k/faker#localization).
The `lorem` provider is a special case in that it falls back to "latin-ish". This is because the original point of the provider was to provide a 'lorem ipsum' kind of text, which one would expect to be in "latin-ish".
I see how this could be confusing from the user perspective. It should at very last fall back to `en_US` as stated in the docs. But switching the fallback locale for `lorem` would be a breaking change.
--------------------
</issues> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | |
Textualize__textual-269 | 269 | Textualize/textual | null | 12bfe8c34acd9977495d2d36612af1241a6797b5 | 2022-02-07T11:37:25Z | diff --git a/src/textual/renderables/sparkline.py b/src/textual/renderables/sparkline.py
new file mode 100644
index 0000000000..22bc959f75
--- /dev/null
+++ b/src/textual/renderables/sparkline.py
@@ -0,0 +1,127 @@
+from __future__ import annotations
+
+import statistics
+from typing import Sequence, Iterable, Callable, TypeVar
+
+from rich.color import Color
+from rich.console import ConsoleOptions, Console, RenderResult
+from rich.segment import Segment
+from rich.style import Style
+
+T = TypeVar("T", int, float)
+
+
+class Sparkline:
+ """A sparkline representing a series of data.
+
+ Args:
+ data (Sequence[T]): The sequence of data to render.
+ width (int, optional): The width of the sparkline/the number of buckets to partition the data into.
+ min_color (Color, optional): The color of values equal to the min value in data.
+ max_color (Color, optional): The color of values equal to the max value in data.
+ summary_function (Callable[list[T]]): Function that will be applied to each bucket.
+ """
+
+ BARS = "▁▂▃▄▅▆▇█"
+
+ def __init__(
+ self,
+ data: Sequence[T],
+ *,
+ width: int | None,
+ min_color: Color = Color.from_rgb(0, 255, 0),
+ max_color: Color = Color.from_rgb(255, 0, 0),
+ summary_function: Callable[[list[T]], float] = max,
+ ) -> None:
+ self.data = data
+ self.width = width
+ self.min_color = Style.from_color(min_color)
+ self.max_color = Style.from_color(max_color)
+ self.summary_function = summary_function
+
+ @classmethod
+ def _buckets(cls, data: Sequence[T], num_buckets: int) -> Iterable[list[T]]:
+ """Partition ``data`` into ``num_buckets`` buckets. For example, the data
+ [1, 2, 3, 4] partitioned into 2 buckets is [[1, 2], [3, 4]].
+
+ Args:
+ data (Sequence[T]): The data to partition.
+ num_buckets (int): The number of buckets to partition the data into.
+ """
+ num_steps, remainder = divmod(len(data), num_buckets)
+ for i in range(num_buckets):
+ start = i * num_steps + min(i, remainder)
+ end = (i + 1) * num_steps + min(i + 1, remainder)
+ partition = data[start:end]
+ if partition:
+ yield partition
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ width = self.width or options.max_width
+ len_data = len(self.data)
+ if len_data == 0:
+ yield Segment("▁" * width, style=self.min_color)
+ return
+ if len_data == 1:
+ yield Segment("█" * width, style=self.max_color)
+ return
+
+ minimum, maximum = min(self.data), max(self.data)
+ extent = maximum - minimum or 1
+
+ buckets = list(self._buckets(self.data, num_buckets=self.width))
+
+ bucket_index = 0
+ bars_rendered = 0
+ step = len(buckets) / width
+ summary_function = self.summary_function
+ min_color, max_color = self.min_color.color, self.max_color.color
+ while bars_rendered < width:
+ partition = buckets[int(bucket_index)]
+ partition_summary = summary_function(partition)
+ height_ratio = (partition_summary - minimum) / extent
+ bar_index = int(height_ratio * (len(self.BARS) - 1))
+ bar_color = _blend_colors(min_color, max_color, height_ratio)
+ bars_rendered += 1
+ bucket_index += step
+ yield Segment(text=self.BARS[bar_index], style=Style.from_color(bar_color))
+
+
+def _blend_colors(color1: Color, color2: Color, ratio: float) -> Color:
+ """Given two RGB colors, return a color that sits some distance between
+ them in RGB color space.
+
+ Args:
+ color1 (Color): The first color.
+ color2 (Color): The second color.
+ ratio (float): The ratio of color1 to color2.
+
+ Returns:
+ Color: A Color representing the blending of the two supplied colors.
+ """
+ r1, g1, b1 = color1.triplet
+ r2, g2, b2 = color2.triplet
+ dr = r2 - r1
+ dg = g2 - g1
+ db = b2 - b1
+ return Color.from_rgb(
+ red=r1 + dr * ratio, green=g1 + dg * ratio, blue=b1 + db * ratio
+ )
+
+
+if __name__ == "__main__":
+ console = Console()
+
+ def last(l):
+ return l[-1]
+
+ funcs = min, max, last, statistics.median, statistics.mean
+ nums = [10, 2, 30, 60, 45, 20, 7, 8, 9, 10, 50, 13, 10, 6, 5, 4, 3, 7, 20]
+ console.print(f"data = {nums}\n")
+ for f in funcs:
+ console.print(
+ f"{f.__name__}:\t", Sparkline(nums, width=12, summary_function=f), end=""
+ )
+ console.print("\n")
| diff --git a/tests/renderables/test_sparkline.py b/tests/renderables/test_sparkline.py
new file mode 100644
index 0000000000..74f1f2f6bc
--- /dev/null
+++ b/tests/renderables/test_sparkline.py
@@ -0,0 +1,41 @@
+from tests.utilities.render import render
+from textual.renderables.sparkline import Sparkline
+
+GREEN = "\x1b[38;2;0;255;0m"
+RED = "\x1b[38;2;255;0;0m"
+BLENDED = "\x1b[38;2;127;127;0m" # Color between red and green
+STOP = "\x1b[0m"
+
+
+def test_sparkline_no_data():
+ assert render(Sparkline([], width=4)) == f"{GREEN}▁▁▁▁{STOP}"
+
+
+def test_sparkline_single_datapoint():
+ assert render(Sparkline([2.5], width=4)) == f"{RED}████{STOP}"
+
+
+def test_sparkline_two_values_min_max():
+ assert render(Sparkline([2, 4], width=2)) == f"{GREEN}▁{STOP}{RED}█{STOP}"
+
+
+def test_sparkline_expand_data_to_width():
+ assert render(Sparkline([2, 4],
+ width=4)) == f"{GREEN}▁{STOP}{GREEN}▁{STOP}{RED}█{STOP}{RED}█{STOP}"
+
+
+def test_sparkline_expand_data_to_width_non_divisible():
+ assert render(Sparkline([2, 4], width=3)) == f"{GREEN}▁{STOP}{GREEN}▁{STOP}{RED}█{STOP}"
+
+
+def test_sparkline_shrink_data_to_width():
+ assert render(Sparkline([2, 2, 4, 4, 6, 6], width=3)) == f"{GREEN}▁{STOP}{BLENDED}▄{STOP}{RED}█{STOP}"
+
+
+def test_sparkline_shrink_data_to_width_non_divisible():
+ assert render(
+ Sparkline([1, 2, 3, 4, 5], width=3, summary_function=min)) == f"{GREEN}▁{STOP}{BLENDED}▄{STOP}{RED}█{STOP}"
+
+
+def test_sparkline_color_blend():
+ assert render(Sparkline([1, 2, 3], width=3)) == f"{GREEN}▁{STOP}{BLENDED}▄{STOP}{RED}█{STOP}"
| [
{
"components": [
{
"doc": "A sparkline representing a series of data.\n\nArgs:\n data (Sequence[T]): The sequence of data to render.\n width (int, optional): The width of the sparkline/the number of buckets to partition the data into.\n min_color (Color, optional): The color of values eq... | [
"tests/renderables/test_sparkline.py::test_sparkline_no_data",
"tests/renderables/test_sparkline.py::test_sparkline_single_datapoint",
"tests/renderables/test_sparkline.py::test_sparkline_two_values_min_max",
"tests/renderables/test_sparkline.py::test_sparkline_expand_data_to_width",
"tests/renderables/test... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Sparklines
Closes #259

----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/textual/renderables/sparkline.py]
(definition of Sparkline:)
class Sparkline:
"""A sparkline representing a series of data.
Args:
data (Sequence[T]): The sequence of data to render.
width (int, optional): The width of the sparkline/the number of buckets to partition the data into.
min_color (Color, optional): The color of values equal to the min value in data.
max_color (Color, optional): The color of values equal to the max value in data.
summary_function (Callable[list[T]]): Function that will be applied to each bucket."""
(definition of Sparkline.__init__:)
def __init__( self, data: Sequence[T], *, width: int | None, min_color: Color = Color.from_rgb(0, 255, 0), max_color: Color = Color.from_rgb(255, 0, 0), summary_function: Callable[[list[T]], float] = max, ) -> None:
(definition of Sparkline._buckets:)
def _buckets(cls, data: Sequence[T], num_buckets: int) -> Iterable[list[T]]:
"""Partition ``data`` into ``num_buckets`` buckets. For example, the data
[1, 2, 3, 4] partitioned into 2 buckets is [[1, 2], [3, 4]].
Args:
data (Sequence[T]): The data to partition.
num_buckets (int): The number of buckets to partition the data into."""
(definition of Sparkline.__rich_console__:)
def __rich_console__( self, console: Console, options: ConsoleOptions ) -> RenderResult:
(definition of _blend_colors:)
def _blend_colors(color1: Color, color2: Color, ratio: float) -> Color:
"""Given two RGB colors, return a color that sits some distance between
them in RGB color space.
Args:
color1 (Color): The first color.
color2 (Color): The second color.
ratio (float): The ratio of color1 to color2.
Returns:
Color: A Color representing the blending of the two supplied colors."""
(definition of last:)
def last(l):
[end of new definitions in src/textual/renderables/sparkline.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Sparkline renderable
Implement a renderable that generates a sparkline.
The input should be a list of floats of any size. The Sparkline renderable should use the same block characters as used in the scrollbar to render a minature graph.
```python
class Sparkline:
def __init__(self, data: list[float], width:int | None):
...
```
Should look something like the following:
https://blog.jonudell.net/2021/08/05/the-tao-of-unicode-sparklines/
I think we should also color each bar by blending two colors.
Note that the width may not match the data length, so you may need to pick the closes data point.
----------
--------------------
</issues> | 86e93536b991014e0ea4bf993068202b446bb698 | |
conan-io__conan-10530 | 10,530 | conan-io/conan | null | ebed0370c439a8274c2992ee41e424438eebd455 | 2022-02-07T09:04:18Z | diff --git a/conan/tools/files/__init__.py b/conan/tools/files/__init__.py
index 7a84ecda977..03e30e82c30 100644
--- a/conan/tools/files/__init__.py
+++ b/conan/tools/files/__init__.py
@@ -4,3 +4,4 @@
from conan.tools.files.cpp_package import CppPackage
from conan.tools.files.packager import AutoPackager
from conan.tools.files.symlinks import symlinks
+from conan.tools.files.copy_pattern import copy
diff --git a/conan/tools/files/copy_pattern.py b/conan/tools/files/copy_pattern.py
new file mode 100644
index 00000000000..d7062709aa1
--- /dev/null
+++ b/conan/tools/files/copy_pattern.py
@@ -0,0 +1,226 @@
+import fnmatch
+import os
+import shutil
+from collections import defaultdict
+
+from conans.util.files import mkdir
+
+
+def copy(conanfile, pattern, src, dst, keep_path=True, excludes=None,
+ ignore_case=True, copy_symlink_folders=True):
+ # FIXME: Simplify this removing the FileCopier class and moving to simple methods
+ file_copier = _FileCopier([src], dst)
+ copied = file_copier(pattern, keep_path=keep_path, excludes=excludes,
+ ignore_case=ignore_case, copy_symlink_folders=copy_symlink_folders)
+ # FIXME: Not always passed conanfile
+ if conanfile:
+ report_copied_files(copied, conanfile.output)
+ return copied
+
+
+# FIXME: Transform to functions, without several origins
+class _FileCopier(object):
+ """ main responsible of copying files from place to place:
+ package: build folder -> package folder
+ export: user folder -> store "export" folder
+ """
+ def __init__(self, source_folders, root_destination_folder):
+ """
+ Takes the base folders to copy resources src -> dst. These folders names
+ will not be used in the relative names while copying
+ param source_folders: list of folders to copy things from, typically the
+ store build folder
+ param root_destination_folder: The base folder to copy things to, typically the
+ store package folder
+ """
+ assert isinstance(source_folders, list), "source folders must be a list"
+ self._src_folders = source_folders
+ self._dst_folder = root_destination_folder
+ self._copied = []
+
+ def report(self, scoped_output):
+ return report_copied_files(self._copied, scoped_output)
+
+ def __call__(self, pattern, dst="", src="", keep_path=True, excludes=None, ignore_case=True,
+ copy_symlink_folders=True):
+ """
+ It will copy the files matching the pattern from the src folder to the dst, including the
+ symlinks to files. If a folder from "src" doesn't contain any file to be copied, it won't be
+ created empty at the "dst".
+ If in "src" there are symlinks to folders, they will be created at "dst" irrespective if
+ they (or the folder where points) have files to be copied or not, unless
+ "copy_symlink_folders=False" is specified.
+
+ param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll
+ param dst: the destination local folder, wrt to current conanfile dir, to which
+ the files will be copied. Eg: "bin"
+ param src: the source folder in which those files will be searched. This folder
+ will be stripped from the dst name. Eg.: lib/Debug/x86
+ param keep_path: False if you want the relative paths to be maintained from
+ src to dst folders, or just drop. False is useful if you want
+ to collect e.g. many *.libs among many dirs into a single
+ lib dir
+ param excludes: Single pattern or a tuple of patterns to be excluded from the copy
+ param ignore_case: will do a case-insensitive pattern matching when True
+ param copy_symlink_folders: Copy the symlink folders at the "dst" folder.
+
+ return: list of copied files
+ """
+
+ if os.path.isabs(src):
+ # Avoid repeatedly copying absolute paths
+ return self._copy(os.curdir, pattern, src, dst, ignore_case, excludes, keep_path,
+ excluded_folders=[self._dst_folder],
+ copy_symlink_folders=copy_symlink_folders)
+
+ files = []
+ for src_folder in self._src_folders:
+ excluded = [self._dst_folder]
+ excluded.extend([d for d in self._src_folders if d is not src_folder])
+ fs = self._copy(src_folder, pattern, src, dst, ignore_case, excludes, keep_path,
+ excluded_folders=excluded, copy_symlink_folders=copy_symlink_folders)
+ files.extend(fs)
+
+ return files
+
+ def _copy(self, base_src, pattern, src, dst, ignore_case, excludes, keep_path,
+ excluded_folders, copy_symlink_folders):
+ # Check for ../ patterns and allow them
+ if pattern.startswith(".."):
+ rel_dir = os.path.abspath(os.path.join(base_src, pattern))
+ base_src = os.path.dirname(rel_dir)
+ pattern = os.path.basename(rel_dir)
+
+ src = os.path.join(base_src, src)
+ dst = os.path.join(self._dst_folder, dst)
+ if src == dst:
+ return []
+
+ files_to_copy, symlinked_folders = self._filter_files(src, pattern, excludes, ignore_case,
+ excluded_folders)
+
+ copied_files = self._copy_files(files_to_copy, src, dst, keep_path)
+ if copy_symlink_folders:
+ self._create_symlinked_folders(src, dst, symlinked_folders)
+
+ self._copied.extend(files_to_copy)
+ return copied_files
+
+ @staticmethod
+ def _create_symlinked_folders(src, dst, symlinked_folders):
+ """If in the src folder there are folders that are symlinks, create them in the dst folder
+ pointing exactly to the same place."""
+ for folder in symlinked_folders:
+ relative_path = os.path.relpath(folder, src)
+ symlink_path = os.path.join(dst, relative_path)
+ # We create the same symlink in dst, no matter if it is absolute or relative
+ link_dst = os.readlink(folder) # This could be perfectly broken
+
+ # Create the parent directory that will contain the symlink file
+ mkdir(os.path.dirname(symlink_path))
+ # If the symlink is already there, remove it (multiple copy(*.h) copy(*.dll))
+ if os.path.islink(symlink_path):
+ os.unlink(symlink_path)
+ os.symlink(link_dst, symlink_path)
+
+ @staticmethod
+ def _filter_files(src, pattern, excludes, ignore_case, excluded_folders):
+
+ """ return a list of the files matching the patterns
+ The list will be relative path names wrt to the root src folder
+ """
+ filenames = []
+ symlinked_folders = []
+
+ if excludes:
+ if not isinstance(excludes, (tuple, list)):
+ excludes = (excludes, )
+ if ignore_case:
+ excludes = [e.lower() for e in excludes]
+ else:
+ excludes = []
+
+ for root, subfolders, files in os.walk(src, followlinks=True):
+ if root in excluded_folders:
+ subfolders[:] = []
+ continue
+
+ if os.path.islink(root):
+ symlinked_folders.append(root)
+ # This is a symlink folder, the symlink will be copied, so stop iterating this folder
+ subfolders[:] = []
+ continue
+
+ relative_path = os.path.relpath(root, src)
+ compare_relative_path = relative_path.lower() if ignore_case else relative_path
+ for exclude in excludes:
+ if fnmatch.fnmatch(compare_relative_path, exclude):
+ subfolders[:] = []
+ files = []
+ break
+ for f in files:
+ relative_name = os.path.normpath(os.path.join(relative_path, f))
+ filenames.append(relative_name)
+
+ if ignore_case:
+ pattern = pattern.lower()
+ files_to_copy = [n for n in filenames if fnmatch.fnmatch(os.path.normpath(n.lower()),
+ pattern)]
+ else:
+ files_to_copy = [n for n in filenames if fnmatch.fnmatchcase(os.path.normpath(n),
+ pattern)]
+
+ for exclude in excludes:
+ if ignore_case:
+ files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f.lower(), exclude)]
+ else:
+ files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatchcase(f, exclude)]
+
+ return files_to_copy, symlinked_folders
+
+ @staticmethod
+ def _copy_files(files, src, dst, keep_path):
+ """ executes a multiple file copy from [(src_file, dst_file), (..)]
+ managing symlinks if necessary
+ """
+ copied_files = []
+ for filename in files:
+ abs_src_name = os.path.join(src, filename)
+ filename = filename if keep_path else os.path.basename(filename)
+ abs_dst_name = os.path.normpath(os.path.join(dst, filename))
+ try:
+ os.makedirs(os.path.dirname(abs_dst_name))
+ except Exception:
+ pass
+ if os.path.islink(abs_src_name):
+ linkto = os.readlink(abs_src_name) # @UndefinedVariable
+ try:
+ os.remove(abs_dst_name)
+ except OSError:
+ pass
+ os.symlink(linkto, abs_dst_name) # @UndefinedVariable
+ else:
+ shutil.copy2(abs_src_name, abs_dst_name)
+ copied_files.append(abs_dst_name)
+ return copied_files
+
+
+# FIXME: This doesn't belong here
+def report_copied_files(copied, scoped_output, message_suffix="Copied"):
+ ext_files = defaultdict(list)
+ for f in copied:
+ _, ext = os.path.splitext(f)
+ ext_files[ext].append(os.path.basename(f))
+
+ if not ext_files:
+ return False
+
+ for ext, files in ext_files.items():
+ files_str = (": " + ", ".join(files)) if len(files) < 5 else ""
+ file_or_files = "file" if len(files) == 1 else "files"
+ if not ext:
+ scoped_output.info("%s %d %s%s" % (message_suffix, len(files), file_or_files, files_str))
+ else:
+ scoped_output.info("%s %d '%s' %s%s"
+ % (message_suffix, len(files), ext, file_or_files, files_str))
+ return True
| diff --git a/conans/test/unittests/tools/files/test_tool_copy.py b/conans/test/unittests/tools/files/test_tool_copy.py
new file mode 100644
index 00000000000..84f5fc618bf
--- /dev/null
+++ b/conans/test/unittests/tools/files/test_tool_copy.py
@@ -0,0 +1,263 @@
+import mock
+import os
+import platform
+import unittest
+
+import pytest
+
+from conan.tools.files import copy
+from conans.test.utils.test_files import temp_folder
+from conans.util.files import load, save
+
+
+class ToolCopyTest(unittest.TestCase):
+
+ def test_basic(self):
+ folder1 = temp_folder()
+ sub1 = os.path.join(folder1, "subdir1")
+ sub2 = os.path.join(folder1, "subdir2")
+ save(os.path.join(sub1, "file1.txt"), "hello1")
+ save(os.path.join(sub1, "file2.c"), "Hello2")
+ save(os.path.join(sub1, "sub1/file1.txt"), "Hello1 sub")
+ save(os.path.join(sub1, "sub1/file2.c"), "Hello2 sub")
+ save(os.path.join(sub2, "file1.txt"), "2 Hello1")
+ save(os.path.join(sub2, "file2.c"), "2 Hello2")
+
+ folder2 = temp_folder()
+ copy(None, "*.txt", folder1, os.path.join(folder2, "texts"))
+ self.assertEqual("hello1", load(os.path.join(folder2, "texts/subdir1/file1.txt")))
+ self.assertEqual("Hello1 sub", load(os.path.join(folder2, "texts/subdir1/sub1/file1.txt")))
+ self.assertEqual("2 Hello1", load(os.path.join(folder2, "texts/subdir2/file1.txt")))
+ self.assertEqual(['file1.txt'], os.listdir(os.path.join(folder2, "texts/subdir2")))
+
+ folder2 = temp_folder()
+ copy(None, "*.txt", os.path.join(folder1, "subdir1"), os.path.join(folder2, "texts"))
+ self.assertEqual("hello1", load(os.path.join(folder2, "texts/file1.txt")))
+ self.assertEqual("Hello1 sub", load(os.path.join(folder2, "texts/sub1/file1.txt")))
+ self.assertNotIn("subdir2", os.listdir(os.path.join(folder2, "texts")))
+
+ @pytest.mark.skipif(platform.system() == "Windows", reason="Requires Symlinks")
+ def test_basic_with_linked_dir(self):
+ folder1 = temp_folder()
+ sub1 = os.path.join(folder1, "subdir1")
+ sub2 = os.path.join(folder1, "subdir2")
+ os.makedirs(sub1)
+ os.symlink("subdir1", sub2)
+ save(os.path.join(sub1, "file1.txt"), "hello1")
+ save(os.path.join(sub1, "file2.c"), "Hello2")
+ save(os.path.join(sub1, "sub1/file1.txt"), "Hello1 sub")
+ folder2 = temp_folder()
+ copy(None, "*.txt", folder1, os.path.join(folder2, "texts"))
+ self.assertEqual(os.readlink(os.path.join(folder2, "texts/subdir2")), "subdir1")
+ self.assertEqual("hello1", load(os.path.join(folder2, "texts/subdir1/file1.txt")))
+ self.assertEqual("Hello1 sub", load(os.path.join(folder2,
+ "texts/subdir1/sub1/file1.txt")))
+ self.assertEqual("hello1", load(os.path.join(folder2, "texts/subdir2/file1.txt")))
+ self.assertEqual(['file1.txt', 'sub1'].sort(),
+ os.listdir(os.path.join(folder2, "texts/subdir2")).sort())
+
+ folder2 = temp_folder()
+ copy(None, "*.txt", os.path.join(folder1, "subdir1"), os.path.join(folder2, "texts"))
+ self.assertEqual("hello1", load(os.path.join(folder2, "texts/file1.txt")))
+ self.assertEqual("Hello1 sub", load(os.path.join(folder2, "texts/sub1/file1.txt")))
+ self.assertNotIn("subdir2", os.listdir(os.path.join(folder2, "texts")))
+
+ @pytest.mark.skipif(platform.system() == "Windows", reason="Requires Symlinks")
+ def test_linked_folder_missing_error(self):
+ folder1 = temp_folder()
+ sub1 = os.path.join(folder1, "subdir1")
+ sub2 = os.path.join(folder1, "subdir2")
+ os.makedirs(sub1)
+ os.symlink("subdir1", sub2) # @UndefinedVariable
+ save(os.path.join(sub1, "file1.txt"), "hello1")
+ save(os.path.join(sub1, "file2.c"), "Hello2")
+ save(os.path.join(sub1, "sub1/file1.txt"), "Hello1 sub")
+
+ folder2 = temp_folder()
+ copy(None, "*.cpp", folder1, folder2)
+ # If we don't specify anything, the "subdir2" (symlinked folder) will be there even if it
+ # points to an empty folder
+ self.assertEqual(os.listdir(folder2), ["subdir2"])
+ sub2_abs = os.path.join(folder2, "subdir2")
+ assert os.path.islink(sub2_abs)
+ assert os.readlink(sub2_abs) == "subdir1"
+
+ # If we specify anything, the "subdir2" (symlinked folder) will be there even if it
+ # points to an empty folder
+ os.remove(sub2_abs)
+ copy(None, "*.cpp", folder1, folder2, copy_symlink_folders=False)
+ self.assertEqual(os.listdir(folder2), [])
+
+ copy(None, "*.txt", folder1, folder2)
+ self.assertEqual(sorted(os.listdir(folder2)), sorted(["subdir1", "subdir2"]))
+ self.assertEqual(os.readlink(os.path.join(folder2, "subdir2")), "subdir1")
+ self.assertEqual("hello1", load(os.path.join(folder2, "subdir1/file1.txt")))
+ self.assertEqual("hello1", load(os.path.join(folder2, "subdir2/file1.txt")))
+
+ @pytest.mark.skipif(platform.system() == "Windows", reason="Requires Symlinks")
+ def test_linked_relative(self):
+ folder1 = temp_folder()
+ sub1 = os.path.join(folder1, "foo/other/file")
+ save(os.path.join(sub1, "file.txt"), "Hello")
+ sub2 = os.path.join(folder1, "foo/symlink")
+ os.symlink("other/file", sub2) # @UndefinedVariable
+
+ folder2 = temp_folder()
+ copy(None, "*", folder1, folder2)
+ symlink = os.path.join(folder2, "foo", "symlink")
+ self.assertTrue(os.path.islink(symlink))
+ self.assertTrue(load(os.path.join(symlink, "file.txt")), "Hello")
+
+ @pytest.mark.skipif(platform.system() == "Windows", reason="Requires Symlinks")
+ def test_linked_folder_nested(self):
+ # https://github.com/conan-io/conan/issues/2959
+ folder1 = temp_folder()
+ sub1 = os.path.join(folder1, "lib/icu/60.2")
+ sub2 = os.path.join(folder1, "lib/icu/current")
+ os.makedirs(sub1)
+ os.symlink("60.2", sub2) # @UndefinedVariable
+
+ folder2 = temp_folder()
+ copied = copy(None, "*.cpp", folder1, folder2)
+ self.assertEqual(copied, [])
+
+ @pytest.mark.skipif(platform.system() == "Windows", reason="Requires Symlinks")
+ def test_linked_folder_copy_from_linked_folder(self):
+ # https://github.com/conan-io/conan/issues/5114
+ folder1 = temp_folder(path_with_spaces=False)
+ sub_src = os.path.join(folder1, "sub/src")
+
+ src = os.path.join(folder1, "src")
+ src_dir = os.path.join(folder1, "src/dir")
+ src_dir_link = os.path.join(folder1, "src/dir_link")
+ src_dir_file = os.path.join(src_dir, "file.txt")
+
+ dst = os.path.join(folder1, "dst")
+ dst_dir = os.path.join(folder1, "dst/dir")
+ dst_dir_link = os.path.join(folder1, "dst/dir_link")
+ dst_dir_file = os.path.join(dst_dir, "file.txt")
+
+ os.makedirs(dst)
+ os.makedirs(sub_src)
+ # input src folder should be a symlink
+ os.symlink(sub_src, src)
+ # folder, file and folder link to copy
+ os.mkdir(src_dir)
+ save(src_dir_file, "file")
+ os.symlink(src_dir, src_dir_link)
+
+ copied = copy(None, "*", src, dst)
+
+ self.assertEqual(copied, [dst_dir_file])
+ self.assertEqual(os.listdir(dst), os.listdir(src))
+ self.assertTrue(os.path.islink(dst_dir_link))
+
+ def test_excludes(self):
+ folder1 = temp_folder()
+ sub1 = os.path.join(folder1, "subdir1")
+ save(os.path.join(sub1, "file1.txt"), "hello1")
+ save(os.path.join(sub1, "file2.c"), "Hello2")
+
+ folder2 = temp_folder()
+ copy(None, "*.*", folder1, os.path.join(folder2, "texts"), excludes="*.c")
+ self.assertEqual(['file1.txt'], os.listdir(os.path.join(folder2, "texts/subdir1")))
+
+ folder1 = temp_folder()
+ save(os.path.join(folder1, "MyLib.txt"), "")
+ save(os.path.join(folder1, "MyLibImpl.txt"), "")
+ save(os.path.join(folder1, "MyLibTests.txt"), "")
+ folder2 = temp_folder()
+ copy(None, "*.txt", folder1, folder2, excludes="*Test*.txt")
+ self.assertEqual({'MyLib.txt', 'MyLibImpl.txt'}, set(os.listdir(folder2)))
+
+ folder2 = temp_folder()
+ copy(None, "*.txt", folder1, folder2, excludes=("*Test*.txt", "*Impl*"))
+ self.assertEqual(['MyLib.txt'], os.listdir(folder2))
+
+ def test_excludes_camelcase_folder(self):
+ # https://github.com/conan-io/conan/issues/8153
+ folder1 = temp_folder()
+ save(os.path.join(folder1, "UPPER.txt"), "")
+ save(os.path.join(folder1, "lower.txt"), "")
+ sub2 = os.path.join(folder1, "CamelCaseIgnore")
+ save(os.path.join(sub2, "file3.txt"), "")
+
+ folder2 = temp_folder()
+ copy(None, "*", folder1, folder2, excludes=["CamelCaseIgnore", "UPPER.txt"])
+ self.assertFalse(os.path.exists(os.path.join(folder2, "CamelCaseIgnore")))
+ self.assertFalse(os.path.exists(os.path.join(folder2, "UPPER.txt")))
+ self.assertTrue(os.path.exists(os.path.join(folder2, "lower.txt")))
+
+ folder2 = temp_folder()
+ copy(None, "*", folder1, folder2)
+ self.assertTrue(os.path.exists(os.path.join(folder2, "CamelCaseIgnore")))
+ self.assertTrue(os.path.exists(os.path.join(folder2, "UPPER.txt")))
+ self.assertTrue(os.path.exists(os.path.join(folder2, "lower.txt")))
+
+ def test_multifolder(self):
+ src_folder1 = temp_folder()
+ src_folder2 = temp_folder()
+ save(os.path.join(src_folder1, "file1.txt"), "hello1")
+ save(os.path.join(src_folder2, "file2.txt"), "Hello2")
+
+ dst_folder = temp_folder()
+ copy(None, "*", src_folder1, dst_folder)
+ copy(None, "*", src_folder2, dst_folder)
+ self.assertEqual(['file1.txt', 'file2.txt'],
+ sorted(os.listdir(dst_folder)))
+
+ @mock.patch('shutil.copy2')
+ def test_avoid_repeat_copies(self, copy2_mock):
+ src_folders = [temp_folder() for _ in range(2)]
+ for index, src_folder in enumerate(src_folders):
+ save(os.path.join(src_folder, "sub", "file%d.txt" % index),
+ "Hello%d" % index)
+
+ dst_folder = temp_folder()
+
+ for src_folder in src_folders:
+ copy(None, "*", os.path.join(src_folder, "sub"), dst_folder)
+
+ self.assertEqual(copy2_mock.call_count, len(src_folders))
+
+ def test_ignore_case(self):
+ src_folder = temp_folder()
+ save(os.path.join(src_folder, "FooBar.txt"), "Hello")
+
+ dst_folder = temp_folder()
+ copy(None, "foobar.txt", src_folder, dst_folder, ignore_case=False)
+ self.assertEqual([], os.listdir(dst_folder))
+
+ dst_folder = temp_folder()
+ copy(None, "FooBar.txt", src_folder, dst_folder, ignore_case=False)
+ self.assertEqual(["FooBar.txt"], os.listdir(dst_folder))
+
+ dst_folder = temp_folder()
+ copy(None, "foobar.txt", src_folder, dst_folder, ignore_case=True)
+ self.assertEqual(["FooBar.txt"], os.listdir(dst_folder))
+
+ def test_ignore_case_excludes(self):
+ src_folder = temp_folder()
+ save(os.path.join(src_folder, "file.h"), "")
+ save(os.path.join(src_folder, "AttributeStorage.h"), "")
+ save(os.path.join(src_folder, "sub/file.h"), "")
+ save(os.path.join(src_folder, "sub/AttributeStorage.h"), "")
+
+ dst_folder = temp_folder()
+ # Exclude pattern will match AttributeStorage
+ copy(None, "*.h", src_folder, os.path.join(dst_folder, "include"),
+ excludes="*Test*")
+ self.assertEqual(["include"], os.listdir(dst_folder))
+ self.assertEqual(sorted(["file.h", "sub"]),
+ sorted(os.listdir(os.path.join(dst_folder, "include"))))
+ self.assertEqual(["file.h"], os.listdir(os.path.join(dst_folder, "include", "sub")))
+
+ dst_folder = temp_folder()
+ # Exclude pattern will not match AttributeStorage if ignore_case=False
+ copy(None, "*.h", src_folder, os.path.join(dst_folder, "include"), excludes="*Test*",
+ ignore_case=False)
+ self.assertEqual(["include"], os.listdir(dst_folder))
+ self.assertEqual(sorted(["AttributeStorage.h", "file.h", "sub"]),
+ sorted(os.listdir(os.path.join(dst_folder, "include"))))
+ self.assertEqual(sorted(["AttributeStorage.h", "file.h"]),
+ sorted(os.listdir(os.path.join(dst_folder, "include", "sub"))))
| [
{
"components": [
{
"doc": "",
"lines": [
9,
18
],
"name": "copy",
"signature": "def copy(conanfile, pattern, src, dst, keep_path=True, excludes=None, ignore_case=True, copy_symlink_folders=True):",
"type": "function"
},
{
... | [
"conans/test/unittests/tools/files/test_tool_copy.py::ToolCopyTest::test_avoid_repeat_copies",
"conans/test/unittests/tools/files/test_tool_copy.py::ToolCopyTest::test_basic",
"conans/test/unittests/tools/files/test_tool_copy.py::ToolCopyTest::test_basic_with_linked_dir",
"conans/test/unittests/tools/files/te... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Ported copy tool from develop2
Changelog: Feature: New `copy` tool at `conan.tools.files` namespace that will replace the `self.copy` in Conan 2.0.
Docs: https://github.com/conan-io/docs/pull/2428
Document the migration plan of self.copy, no imports, no deploy.
Close https://github.com/conan-io/conan/issues/10529
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/files/copy_pattern.py]
(definition of copy:)
def copy(conanfile, pattern, src, dst, keep_path=True, excludes=None, ignore_case=True, copy_symlink_folders=True):
(definition of _FileCopier:)
class _FileCopier(object):
"""main responsible of copying files from place to place:
package: build folder -> package folder
export: user folder -> store "export" folder"""
(definition of _FileCopier.__init__:)
def __init__(self, source_folders, root_destination_folder):
"""Takes the base folders to copy resources src -> dst. These folders names
will not be used in the relative names while copying
param source_folders: list of folders to copy things from, typically the
store build folder
param root_destination_folder: The base folder to copy things to, typically the
store package folder"""
(definition of _FileCopier.report:)
def report(self, scoped_output):
(definition of _FileCopier.__call__:)
def __call__(self, pattern, dst="", src="", keep_path=True, excludes=None, ignore_case=True, copy_symlink_folders=True):
"""It will copy the files matching the pattern from the src folder to the dst, including the
symlinks to files. If a folder from "src" doesn't contain any file to be copied, it won't be
created empty at the "dst".
If in "src" there are symlinks to folders, they will be created at "dst" irrespective if
they (or the folder where points) have files to be copied or not, unless
"copy_symlink_folders=False" is specified.
param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll
param dst: the destination local folder, wrt to current conanfile dir, to which
the files will be copied. Eg: "bin"
param src: the source folder in which those files will be searched. This folder
will be stripped from the dst name. Eg.: lib/Debug/x86
param keep_path: False if you want the relative paths to be maintained from
src to dst folders, or just drop. False is useful if you want
to collect e.g. many *.libs among many dirs into a single
lib dir
param excludes: Single pattern or a tuple of patterns to be excluded from the copy
param ignore_case: will do a case-insensitive pattern matching when True
param copy_symlink_folders: Copy the symlink folders at the "dst" folder.
return: list of copied files"""
(definition of _FileCopier._copy:)
def _copy(self, base_src, pattern, src, dst, ignore_case, excludes, keep_path, excluded_folders, copy_symlink_folders):
(definition of _FileCopier._create_symlinked_folders:)
def _create_symlinked_folders(src, dst, symlinked_folders):
"""If in the src folder there are folders that are symlinks, create them in the dst folder
pointing exactly to the same place."""
(definition of _FileCopier._filter_files:)
def _filter_files(src, pattern, excludes, ignore_case, excluded_folders):
"""return a list of the files matching the patterns
The list will be relative path names wrt to the root src folder"""
(definition of _FileCopier._copy_files:)
def _copy_files(files, src, dst, keep_path):
"""executes a multiple file copy from [(src_file, dst_file), (..)]
managing symlinks if necessary"""
(definition of report_copied_files:)
def report_copied_files(copied, scoped_output, message_suffix="Copied"):
[end of new definitions in conan/tools/files/copy_pattern.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
joke2k__faker-1608 | 1,608 | joke2k/faker | null | 922261db55539967358401193901a486f0967ef4 | 2022-02-05T04:30:09Z | diff --git a/faker/providers/lorem/bn_BD/__init__.py b/faker/providers/lorem/bn_BD/__init__.py
new file mode 100644
index 0000000000..f49df831e1
--- /dev/null
+++ b/faker/providers/lorem/bn_BD/__init__.py
@@ -0,0 +1,233 @@
+from .. import Provider as LoremProvider
+
+
+class Provider(LoremProvider):
+ """Implement lorem provider for ``bn_BD`` locale."""
+
+ # source 1: https://en.wikipedia.org/wiki/Bengali_vocabulary
+ # source 2: https://en.wikipedia.org/wiki/Bengali_grammar
+
+ word_connector = " "
+ sentence_punctuation = "।"
+ word_list = (
+ "পানি",
+ "লবণ",
+ "দাওয়াত",
+ "মরিচ",
+ "খালা",
+ "ফুফু",
+ "গোসল",
+ "বাতাস",
+ "চাহিদা",
+ "স্বাগতম",
+ "যোগ",
+ "আসন",
+ "আশ্রম",
+ "আয়ুর্বেদ",
+ "বন্ধন",
+ "খাট",
+ "ধুতি",
+ "মায়া",
+ "স্বামী",
+ "লক্ষ্মী",
+ "লক্ষ্মণ",
+ "কুড়ি",
+ "খুকি",
+ "খোকা",
+ "খোঁচা",
+ "খোঁজ",
+ "চাল",
+ "চিংড়ি",
+ "চুলা",
+ "ঝিনুক",
+ "ঝোল",
+ "ঠ্যাং",
+ "ঢোল",
+ "পেট",
+ "বোবা",
+ "মাঠ",
+ "মুড়ি",
+ "আবহাওয়া",
+ "চাকরি",
+ "আয়না",
+ "আরাম",
+ "বকশিশ",
+ "আস্তে",
+ "কাগজ",
+ "খারাপ",
+ "খোদা",
+ "খুব",
+ "গরম",
+ "চশমা",
+ "চাকর",
+ "চাদর",
+ "জান",
+ "জায়গা",
+ "ডেগচি",
+ "দম",
+ "দেরি",
+ "দোকান",
+ "পর্দা",
+ "বদ",
+ "বাগান",
+ "রাস্তা",
+ "রোজ",
+ "হিন্দু",
+ "পছন্দ",
+ "টেক্কা",
+ "আলু",
+ "নখ",
+ "খুন",
+ "আওয়াজ",
+ "আসল",
+ "এলাকা",
+ "ওজন",
+ "কলম",
+ "খবর",
+ "খালি",
+ "খেয়াল",
+ "গরিব",
+ "জমা",
+ "তারিখ",
+ "দুনিয়া",
+ "নকল",
+ "ফকির",
+ "বদল",
+ "বাকি",
+ "শয়তান",
+ "সাহেব",
+ "সনদ",
+ "সাল",
+ "সন",
+ "হিসাব",
+ "দাদা",
+ "বাবা",
+ "নানি",
+ "চকমক",
+ "বাবুর্চি",
+ "বেগম",
+ "কেচি",
+ "লাশ",
+ "তবলা",
+ "আলমারি",
+ "ইস্ত্রি",
+ "ইস্তিরি",
+ "ইস্পাত",
+ "কামিজ",
+ "গামলা",
+ "চাবি",
+ "জানালা",
+ "তামাক",
+ "পেরেক",
+ "ফিতা",
+ "বারান্দা",
+ "বালতি",
+ "বেহালা",
+ "বোতাম",
+ "মেজ",
+ "সাবান",
+ "কেদারা",
+ "আতা",
+ "আনারস",
+ "কাজু",
+ "কপি",
+ "পেঁপে",
+ "পেয়ারা",
+ "সালাদ",
+ "গির্জা",
+ "যিশু",
+ "পাদ্রি",
+ "ইংরেজ",
+ "অফিস",
+ "জেল",
+ "ডাক্তার",
+ "পুলিশ",
+ "ব্যাংক",
+ "ভোট",
+ "স্কুল",
+ "হাসপাতাল",
+ "কাপ",
+ "গ্লাস",
+ "চেয়ার",
+ "টেবিল",
+ "বাক্স",
+ "লণ্ঠন",
+ "প্লাস্টিক",
+ "কলেজ",
+ "সাইকেল",
+ "রেস্তোরাঁ",
+ "সুড়ঙ্গ",
+ "চা",
+ "চিনি",
+ "সুনামি",
+ "রিক্সা",
+ "বোকা",
+ "ছোট্ট",
+ "লুঙ্গি",
+ "ডেঙ্গু",
+ "মানুষজন",
+ "মাফিয়া",
+ "স্টুডিও",
+ "ম্যালেরিয়া",
+ "ক্যাঙারু",
+ "বুমেরাং",
+ "আমি",
+ "তুই",
+ "তুমি",
+ "আপনি",
+ "এ",
+ "ইনি",
+ "ও",
+ "উনি",
+ "সে",
+ "তিনি",
+ "সেটি",
+ "আমরা",
+ "তোরা",
+ "তোমরা",
+ "আপনারা",
+ "এরা",
+ "এগুলো",
+ "ওরা",
+ "এঁরা",
+ "ওঁরা",
+ "তারা",
+ "তাঁরা",
+ "সেগুলো",
+ "আমাকে",
+ "তোকে",
+ "আমাদেরকে",
+ "তোদেরকে",
+ "তোমাকে",
+ "তোমাদেরকে",
+ "আপনাকে",
+ "আপনাদেরকে",
+ "একে",
+ "এদেরকে",
+ "এঁকে",
+ "এঁদেরকে",
+ "এটি",
+ "এটা",
+ "ওকে",
+ "ওদেরকে",
+ "ওঁকে",
+ "ওঁদেরকে",
+ "ওটি",
+ "ওটা",
+ "ওগুলো",
+ "তাকে",
+ "তাদেরকে",
+ "তাঁকে",
+ "তাঁদেরকে",
+ "সেটা",
+ "কে",
+ "কার",
+ "কাকে",
+ "কোন",
+ "কি",
+ "কেউ",
+ "কারও",
+ "কাউকে",
+ "কোনও",
+ "কিছু",
+ )
| diff --git a/tests/providers/test_lorem.py b/tests/providers/test_lorem.py
index 0838017e47..22e3d61533 100644
--- a/tests/providers/test_lorem.py
+++ b/tests/providers/test_lorem.py
@@ -3,6 +3,7 @@
import pytest
from faker.providers.lorem.az_AZ import Provider as AzAzLoremProvider
+from faker.providers.lorem.bn_BD import Provider as BnBdLoremProvider
from faker.providers.lorem.cs_CZ import Provider as CsCzLoremProvider
from faker.providers.lorem.de_AT import Provider as DeAtLoremProvider
from faker.providers.lorem.de_DE import Provider as DeDeLoremProvider
@@ -317,6 +318,46 @@ def test_words(self, faker, num_samples):
assert all(isinstance(word, str) and word in AzAzLoremProvider.word_list for word in words)
+class TestBnBd:
+ """Test bn_BD lorem provider"""
+
+ word_list = BnBdLoremProvider.word_list
+
+ def test_paragraph(self, faker, num_samples):
+ num_sentences = 10
+ for _ in range(num_samples):
+ paragraph = faker.paragraph(nb_sentences=num_sentences)
+ assert isinstance(paragraph, str)
+ words = paragraph.replace("।", "").split()
+ assert all(word in self.word_list for word in words)
+
+ def test_paragraphs(self, faker, num_samples):
+ num_paragraphs = 5
+ for _ in range(num_samples):
+ paragraphs = faker.paragraphs(nb=num_paragraphs)
+ for paragraph in paragraphs:
+ assert isinstance(paragraph, str)
+ words = paragraph.replace("।", "").split()
+ assert all(word in self.word_list for word in words)
+
+ def test_sentence(self, faker, num_samples):
+ num_words = 10
+ for _ in range(num_samples):
+ sentence = faker.sentence(nb_words=num_words)
+ assert isinstance(sentence, str)
+ words = sentence.replace("।", "").split()
+ assert all(word in self.word_list for word in words)
+
+ def test_sentences(self, faker, num_samples):
+ num_sentences = 5
+ for _ in range(num_samples):
+ sentences = faker.sentences(nb=num_sentences)
+ for sentence in sentences:
+ assert isinstance(sentence, str)
+ words = sentence.replace("।", "").split()
+ assert all(word in self.word_list for word in words)
+
+
class TestDeDe:
"""Test ```de_DE``` lorem provider"""
| [
{
"components": [
{
"doc": "Implement lorem provider for ``bn_BD`` locale.",
"lines": [
4,
232
],
"name": "Provider",
"signature": "class Provider(LoremProvider):",
"type": "class"
}
],
"file": "faker/providers/lorem/bn_BD/_... | [
"tests/providers/test_lorem.py::TestLoremProvider::test_word_with_defaults",
"tests/providers/test_lorem.py::TestLoremProvider::test_word_with_custom_list",
"tests/providers/test_lorem.py::TestLoremProvider::test_words_with_zero_nb",
"tests/providers/test_lorem.py::TestLoremProvider::test_words_with_defaults"... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Task/jabertuhin/1519/add loremprovider for bn bd
### What does this changes
This pull-request resolves issue #1519 . I have created `bn_BD` package under lorem provider.
### What was wrong
NA
### How this fixes it
NA
### Fixes
NA
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/lorem/bn_BD/__init__.py]
(definition of Provider:)
class Provider(LoremProvider):
"""Implement lorem provider for ``bn_BD`` locale."""
[end of new definitions in faker/providers/lorem/bn_BD/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | ||
sympy__sympy-23014 | 23,014 | sympy/sympy | 1.11 | 69662b4fb4a9c585742da50ff33c5ba896108f23 | 2022-02-04T20:48:58Z | diff --git a/doc/src/explanation/active-deprecations.md b/doc/src/explanation/active-deprecations.md
index fa0328c0fdff..84fda0eb75f9 100644
--- a/doc/src/explanation/active-deprecations.md
+++ b/doc/src/explanation/active-deprecations.md
@@ -74,6 +74,37 @@ will need to either add a `warnings` filter as above or use pytest to filter
SymPy deprecation warnings.
```
+## Version 1.11
+
+(mathematica-parser-additional-translations)=
+### Mathematica parser: removed ``additional_translations`` parameter
+
+The ``additional_translations`` parameter for the Mathematica parser is now deprecated.
+Additional translation rules to convert Mathematica expressions into SymPy ones
+should be specified after the conversion using SymPy's .replace( ) or .subs( )
+methods on the output expression. If the translator fails to recognize the logical
+meaning of a Mathematica expression, a form similar to Mathematica's full form
+will be returned, using SymPy's ``Function`` object to encode the nodes of the
+syntax tree.
+
+For example, suppose you want ``F`` to be a function that returns the maximum
+value multiplied by the minimum value, the previous way to
+specify this conversion was:
+
+```py
+>>> from sympy.parsing.mathematica import mathematica
+>>> mathematica('F[7,5,3]', {'F[*x]':'Max(*x)*Min(*x)'})
+21
+```
+
+Now you can do the same with
+
+```py
+>>> from sympy import Function, Max, Min
+>>> mathematica("F[7,5,3]").replace(Function("F"), lambda *x: Max(*x)*Min(*x))
+21
+```
+
## Version 1.10
(deprecated-traversal-functions-moved)=
diff --git a/sympy/parsing/mathematica.py b/sympy/parsing/mathematica.py
index 9f009958f29a..e206bef79243 100644
--- a/sympy/parsing/mathematica.py
+++ b/sympy/parsing/mathematica.py
@@ -1,28 +1,101 @@
-from typing import Any, Dict as tDict, Tuple as tTuple
-
-from itertools import product
import re
-from sympy.core.sympify import sympify
+import typing
+from itertools import product
+from typing import Any, Dict as tDict, Tuple as tTuple, List, Optional, Union as tUnion, Callable
+
+import sympy
+from sympy import Mul, Add, Pow, log, exp, sqrt, cos, sin, tan, asin, acos, acot, asec, acsc, sinh, cosh, tanh, asinh, \
+ acosh, atanh, acoth, asech, acsch, expand, im, flatten, polylog, cancel, expand_trig, sign, simplify, \
+ UnevaluatedExpr, S, atan, atan2, Mod, Max, Min, rf, Ei, Si, Ci, airyai, airyaiprime, airybi, primepi, prime, \
+ isprime, cot, sec, csc, csch, sech, coth, Function, I, pi, Tuple, GreaterThan, StrictGreaterThan, StrictLessThan, \
+ LessThan, Equality, Or, And, Lambda, Integer, Dummy, symbols
+from sympy.core.sympify import sympify, _sympify
+from sympy.functions.special.bessel import airybiprime
+from sympy.functions.special.error_functions import li
+from sympy.utilities.exceptions import sympy_deprecation_warning
def mathematica(s, additional_translations=None):
- '''
- Users can add their own translation dictionary.
- variable-length argument needs '*' character.
+ """
+ Translate a string containing a Wolfram Mathematica expression to a SymPy
+ expression.
+
+ If the translator is unable to find a suitable SymPy expression, the
+ ``FullForm`` of the Mathematica expression will be output, using SymPy
+ ``Function`` objects as nodes of the syntax tree.
Examples
========
>>> from sympy.parsing.mathematica import mathematica
- >>> mathematica('Log3[9]', {'Log3[x]':'log(x,3)'})
- 2
- >>> mathematica('F[7,5,3]', {'F[*x]':'Max(*x)*Min(*x)'})
+ >>> mathematica("Sin[x]^2 Tan[y]")
+ sin(x)**2*tan(y)
+ >>> e = mathematica("F[7,5,3]")
+ >>> e
+ F(7, 5, 3)
+ >>> from sympy import Function, Max, Min
+ >>> e.replace(Function("F"), lambda *x: Max(*x)*Min(*x))
21
- '''
-
+ Both standard input form and Mathematica full form are supported:
+
+ >>> mathematica("x*(a + b)")
+ x*(a + b)
+ >>> mathematica("Times[x, Plus[a, b]]")
+ x*(a + b)
+
+ To get a matrix from Wolfram's code:
+
+ >>> m = mathematica("{{a, b}, {c, d}}")
+ >>> m
+ ((a, b), (c, d))
+ >>> from sympy import Matrix
+ >>> Matrix(m)
+ Matrix([
+ [a, b],
+ [c, d]])
+
+ If the translation into equivalent SymPy expressions fails, an SymPy
+ expression equivalent to Wolfram Mathematica's "FullForm" will be created:
+
+ >>> mathematica("x_.")
+ Optional(Pattern(x, Blank()))
+ >>> mathematica("Plus @@ {x, y, z}")
+ Apply(Plus, (x, y, z))
+ >>> mathematica("f[x_, 3] := x^3 /; x > 0")
+ SetDelayed(f(Pattern(x, Blank()), 3), Condition(x**3, x > 0))
+ """
parser = MathematicaParser(additional_translations)
- return sympify(parser.parse(s))
+
+ if additional_translations is not None:
+ sympy_deprecation_warning(
+ """The ``additional_translations`` parameter for the Mathematica parser is now deprecated.
+Use SymPy's .replace( ) or .subs( ) methods on the output expression instead.""",
+ deprecated_since_version="1.11",
+ active_deprecations_target="mathematica-parser-additional-translations",
+ )
+ return sympify(parser._parse_old(s))
+
+ return parser.parse(s)
+
+
+def _parse_Function(*args):
+ if len(args) == 1:
+ arg = args[0]
+ Slot = Function("Slot")
+ slots = arg.atoms(Slot)
+ numbers = [a.args[0] for a in slots]
+ number_of_arguments = max(numbers)
+ if isinstance(number_of_arguments, Integer):
+ variables = symbols(f"dummy0:{number_of_arguments}", cls=Dummy)
+ return Lambda(variables, arg.xreplace({Slot(i+1): v for i, v in enumerate(variables)}))
+ return Lambda((), arg)
+ elif len(args) == 2:
+ variables = args[0]
+ body = args[1]
+ return Lambda(variables, body)
+ else:
+ raise SyntaxError("Function node expects 1 or 2 arguments")
def _deco(cls):
@@ -32,8 +105,24 @@ def _deco(cls):
@_deco
class MathematicaParser:
- '''An instance of this class converts a string of a basic Mathematica
- expression to SymPy style. Output is string type.'''
+ """
+ An instance of this class converts a string of a Wolfram Mathematica
+ expression to a SymPy expression.
+
+ The main parser acts internally in three stages:
+
+ 1. tokenizer: tokenizes the Mathematica expression and adds the missing *
+ operators. Handled by ``_from_mathematica_to_tokens(...)``
+ 2. full form list: sort the list of strings output by the tokenizer into a
+ syntax tree of nested lists and strings, equivalent to Mathematica's
+ ``FullForm`` expression output. This is handled by the function
+ ``_from_tokens_to_fullformlist(...)``.
+ 3. SymPy expression: the syntax tree expressed as full form list is visited
+ and the nodes with equivalent classes in SymPy are replaced. Unknown
+ syntax tree nodes are cast to SymPy ``Function`` objects. This is
+ handled by ``_from_fullformlist_to_sympy(...)``.
+
+ """
# left: Mathematica, right: SymPy
CORRESPONDENCES = {
@@ -82,18 +171,18 @@ class MathematicaParser:
# a single whitespace to '*'
'whitespace': (
re.compile(r'''
- (?<=[a-zA-Z\d]) # a letter or a number
- \ # a whitespace
- (?=[a-zA-Z\d]) # a letter or a number
+ (?:(?<=[a-zA-Z\d])|(?<=\d\.)) # a letter or a number
+ \s+ # any number of whitespaces
+ (?:(?=[a-zA-Z\d])|(?=\.\d)) # a letter or a number
''', re.VERBOSE),
'*'),
# add omitted '*' character
'add*_1': (
re.compile(r'''
- (?<=[])\d]) # ], ) or a number
- # ''
- (?=[(a-zA-Z]) # ( or a single letter
+ (?:(?<=[])\d])|(?<=\d\.)) # ], ) or a number
+ # ''
+ (?=[(a-zA-Z]) # ( or a single letter
''', re.VERBOSE),
'*'),
@@ -409,7 +498,7 @@ def _check_input(cls, s):
err = "Currently list is not supported."
raise ValueError(err)
- def parse(self, s):
+ def _parse_old(self, s):
# input check
self._check_input(s)
@@ -437,3 +526,528 @@ def parse(self, s):
# s = cls._replace(s, '}')
return s
+
+ def parse(self, s):
+ s2 = self._from_mathematica_to_tokens(s)
+ s3 = self._from_tokens_to_fullformlist(s2)
+ s4 = self._from_fullformlist_to_sympy(s3)
+ return s4
+
+ INFIX = "Infix"
+ PREFIX = "Prefix"
+ POSTFIX = "Postfix"
+ FLAT = "Flat"
+ RIGHT = "Right"
+ LEFT = "Left"
+
+ _mathematica_op_precedence: List[tTuple[str, Optional[str], tDict[str, tUnion[str, Callable]]]] = [
+ (POSTFIX, None, {";": lambda x: x + ["Null"] if isinstance(x, list) and x and x[0] == "CompoundExpression" else ["CompoundExpression", x, "Null"]}),
+ (INFIX, FLAT, {";": "CompoundExpression"}),
+ (INFIX, RIGHT, {"=": "Set", ":=": "SetDelayed", "+=": "AddTo", "-=": "SubtractFrom", "*=": "TimesBy", "/=": "DivideBy"}),
+ (INFIX, LEFT, {"//": lambda x, y: [x, y]}),
+ (POSTFIX, None, {"&": "Function"}),
+ (INFIX, LEFT, {"/.": "ReplaceAll"}),
+ (INFIX, RIGHT, {"->": "Rule", ":>": "RuleDelayed"}),
+ (INFIX, LEFT, {"/;": "Condition"}),
+ (INFIX, FLAT, {"|": "Alternatives"}),
+ (POSTFIX, None, {"..": "Repeated", "...": "RepeatedNull"}),
+ (INFIX, FLAT, {"||": "Or"}),
+ (INFIX, FLAT, {"&&": "And"}),
+ (PREFIX, None, {"!": "Not"}),
+ (INFIX, FLAT, {"===": "SameQ", "=!=": "UnsameQ"}),
+ (INFIX, FLAT, {"==": "Equal", "!=": "Unequal", "<=": "LessEqual", "<": "Less", ">=": "GreaterEqual", ">": "Greater"}),
+ (INFIX, None, {";;": "Span"}),
+ (INFIX, FLAT, {"+": "Plus", "-": "Plus"}),
+ (INFIX, FLAT, {"*": "Times", "/": "Times"}),
+ (INFIX, FLAT, {".": "Dot"}),
+ (PREFIX, None, {"-": lambda x: MathematicaParser._get_neg(x),
+ "+": lambda x: x}),
+ (INFIX, RIGHT, {"^": "Power"}),
+ (INFIX, RIGHT, {"@@": "Apply", "/@": "Map", "//@": "MapAll", "@@@": lambda x, y: ["Apply", x, y, ["List", "1"]]}),
+ (POSTFIX, None, {"'": "Derivative", "!": "Factorial", "!!": "Factorial2", "--": "Decrement"}),
+ (INFIX, None, {"[": lambda x, y: [x, *y], "[[": lambda x, y: ["Part", x, *y]}),
+ (PREFIX, None, {"{": lambda x: ["List", *x], "(": lambda x: x[0]}),
+ (INFIX, None, {"?": "PatternTest"}),
+ (POSTFIX, None, {
+ "_": lambda x: ["Pattern", x, ["Blank"]],
+ "_.": lambda x: ["Optional", ["Pattern", x, ["Blank"]]],
+ "__": lambda x: ["Pattern", x, ["BlankSequence"]],
+ "___": lambda x: ["Pattern", x, ["BlankNullSequence"]],
+ }),
+ (INFIX, None, {"_": lambda x, y: ["Pattern", x, ["Blank", y]]}),
+ (PREFIX, None, {"#": "Slot", "##": "SlotSequence"}),
+ ]
+
+ _missing_arguments_default = {
+ "#": lambda: ["Slot", "1"],
+ "##": lambda: ["SlotSequence", "1"],
+ }
+
+ _literal = r"[A-Za-z][A-Za-z0-9]*"
+ _number = r"(?:[0-9]+(?:\.[0-9]*)?|\.[0-9]+)"
+
+ _enclosure_open = ["(", "[", "[[", "{"]
+ _enclosure_close = [")", "]", "]]", "}"]
+
+ @classmethod
+ def _get_neg(cls, x):
+ return f"-{x}" if isinstance(x, str) and re.match(MathematicaParser._number, x) else ["Times", "-1", x]
+
+ @classmethod
+ def _get_inv(cls, x):
+ return ["Power", x, "-1"]
+
+ _regex_tokenizer = None
+
+ def _get_tokenizer(self):
+ if self._regex_tokenizer is not None:
+ # Check if the regular expression has already been compiled:
+ return self._regex_tokenizer
+ tokens = [self._literal, self._number]
+ tokens_escape = self._enclosure_open[:] + self._enclosure_close[:]
+ for typ, strat, symdict in self._mathematica_op_precedence:
+ for k in symdict:
+ tokens_escape.append(k)
+ tokens_escape.sort(key=lambda x: -len(x))
+ tokens.extend(map(re.escape, tokens_escape))
+ tokens.append(",")
+ tokens.append("\n")
+ tokenizer = re.compile("(" + "|".join(tokens) + ")")
+ self._regex_tokenizer = tokenizer
+ return self._regex_tokenizer
+
+ def _from_mathematica_to_tokens(self, code: str):
+ tokenizer = self._get_tokenizer()
+
+ # Remove comments:
+ while True:
+ pos_comment_start = code.find("(*")
+ if pos_comment_start == -1:
+ break
+ pos_comment_end = code.find("*)")
+ if pos_comment_end == -1 or pos_comment_end < pos_comment_start:
+ raise SyntaxError("mismatch in comment (* *) code")
+ code = code[:pos_comment_start] + code[pos_comment_end+2:]
+
+ tokens = tokenizer.findall(code)
+
+ # Remove newlines at the beginning
+ while tokens and tokens[0] == "\n":
+ tokens.pop(0)
+ # Remove newlines at the end
+ while tokens and tokens[-1] == "\n":
+ tokens.pop(-1)
+
+ return tokens
+
+ def _is_op(self, token: tUnion[str, list]) -> bool:
+ if isinstance(token, list):
+ return False
+ if re.match(self._literal, token):
+ return False
+ if re.match("-?" + self._number, token):
+ return False
+ return True
+
+ def _is_valid_star1(self, token: tUnion[str, list]) -> bool:
+ if token in (")", "}"):
+ return True
+ return not self._is_op(token)
+
+ def _is_valid_star2(self, token: tUnion[str, list]) -> bool:
+ if token in ("(", "{"):
+ return True
+ return not self._is_op(token)
+
+ def _from_tokens_to_fullformlist(self, tokens: list):
+ stack: List[list] = [[]]
+ open_seq = []
+ pointer: int = 0
+ while pointer < len(tokens):
+ token = tokens[pointer]
+ if token in self._enclosure_open:
+ stack[-1].append(token)
+ open_seq.append(token)
+ stack.append([])
+ elif token == ",":
+ if len(stack[-1]) == 0 and stack[-2][-1] == open_seq[-1]:
+ raise SyntaxError("%s cannot be followed by comma ," % open_seq[-1])
+ stack[-1] = self._parse_after_braces(stack[-1])
+ stack.append([])
+ elif token in self._enclosure_close:
+ ind = self._enclosure_close.index(token)
+ if self._enclosure_open[ind] != open_seq[-1]:
+ unmatched_enclosure = SyntaxError("unmatched enclosure")
+ if token == "]]" and open_seq[-1] == "[":
+ if open_seq[-2] == "[":
+ # These two lines would be logically correct, but are
+ # unnecessary:
+ # token = "]"
+ # tokens[pointer] = "]"
+ tokens.insert(pointer+1, "]")
+ elif open_seq[-2] == "[[":
+ if tokens[pointer+1] == "]":
+ tokens[pointer+1] = "]]"
+ elif tokens[pointer+1] == "]]":
+ tokens[pointer+1] = "]]"
+ tokens.insert(pointer+2, "]")
+ else:
+ raise unmatched_enclosure
+ else:
+ raise unmatched_enclosure
+ if len(stack[-1]) == 0 and stack[-2][-1] == "(":
+ raise SyntaxError("( ) not valid syntax")
+ last_stack = self._parse_after_braces(stack[-1], True)
+ stack[-1] = last_stack
+ new_stack_element = []
+ while stack[-1][-1] != open_seq[-1]:
+ new_stack_element.append(stack.pop())
+ new_stack_element.reverse()
+ if open_seq[-1] == "(" and len(new_stack_element) != 1:
+ raise SyntaxError("( must be followed by one expression, %i detected" % len(new_stack_element))
+ stack[-1].append(new_stack_element)
+ open_seq.pop(-1)
+ else:
+ stack[-1].append(token)
+ pointer += 1
+ assert len(stack) == 1
+ return self._parse_after_braces(stack[0])
+
+ def _util_remove_newlines(self, lines: list, tokens: list, inside_enclosure: bool):
+ pointer = 0
+ size = len(tokens)
+ while pointer < size:
+ token = tokens[pointer]
+ if token == "\n":
+ if inside_enclosure:
+ # Ignore newlines inside enclosures
+ tokens.pop(pointer)
+ size -= 1
+ continue
+ if pointer == 0:
+ tokens.pop(0)
+ size -= 1
+ continue
+ if pointer > 1:
+ try:
+ prev_expr = self._parse_after_braces(tokens[:pointer], inside_enclosure)
+ except SyntaxError:
+ tokens.pop(pointer)
+ size -= 1
+ continue
+ else:
+ prev_expr = tokens[0]
+ if len(prev_expr) > 0 and prev_expr[0] == "CompoundExpression":
+ lines.extend(prev_expr[1:])
+ else:
+ lines.append(prev_expr)
+ for i in range(pointer):
+ tokens.pop(0)
+ size -= pointer
+ pointer = 0
+ continue
+ pointer += 1
+
+ def _util_add_missing_asterisks(self, tokens: list):
+ size: int = len(tokens)
+ pointer: int = 0
+ while pointer < size:
+ if (pointer > 0 and
+ self._is_valid_star1(tokens[pointer - 1]) and
+ self._is_valid_star2(tokens[pointer])):
+ # This is a trick to add missing * operators in the expression,
+ # `"*" in op_dict` makes sure the precedence level is the same as "*",
+ # while `not self._is_op( ... )` makes sure this and the previous
+ # expression are not operators.
+ if tokens[pointer] == "(":
+ # ( has already been processed by now, replace:
+ tokens[pointer] = "*"
+ tokens[pointer + 1] = tokens[pointer + 1][0]
+ else:
+ tokens.insert(pointer, "*")
+ pointer += 1
+ size += 1
+ pointer += 1
+
+ def _parse_after_braces(self, tokens: list, inside_enclosure: bool = False):
+ op_dict: dict
+ changed: bool = False
+ lines: list = []
+
+ self._util_remove_newlines(lines, tokens, inside_enclosure)
+
+ for op_type, grouping_strat, op_dict in reversed(self._mathematica_op_precedence):
+ if "*" in op_dict:
+ self._util_add_missing_asterisks(tokens)
+ size: int = len(tokens)
+ pointer: int = 0
+ while pointer < size:
+ token = tokens[pointer]
+ if isinstance(token, str) and token in op_dict:
+ op_name: tUnion[str, Callable] = op_dict[token]
+ node: list
+ first_index: int
+ if isinstance(op_name, str):
+ node = [op_name]
+ first_index = 1
+ else:
+ node = []
+ first_index = 0
+ if token in ("+", "-") and op_type == self.PREFIX and pointer > 0 and not self._is_op(tokens[pointer - 1]):
+ # Make sure that PREFIX + - don't match expressions like a + b or a - b,
+ # the INFIX + - are supposed to match that expression:
+ pointer += 1
+ continue
+ if op_type == self.INFIX:
+ if pointer == 0 or pointer == size - 1 or self._is_op(tokens[pointer - 1]) or self._is_op(tokens[pointer + 1]):
+ pointer += 1
+ continue
+ changed = True
+ tokens[pointer] = node
+ if op_type == self.INFIX:
+ arg1 = tokens.pop(pointer-1)
+ arg2 = tokens.pop(pointer)
+ if token == "/":
+ arg2 = self._get_inv(arg2)
+ elif token == "-":
+ arg2 = self._get_neg(arg2)
+ pointer -= 1
+ size -= 2
+ node.append(arg1)
+ node_p = node
+ if grouping_strat == self.FLAT:
+ while pointer + 2 < size and self._check_op_compatible(tokens[pointer+1], token):
+ node_p.append(arg2)
+ other_op = tokens.pop(pointer+1)
+ arg2 = tokens.pop(pointer+1)
+ if other_op == "/":
+ arg2 = self._get_inv(arg2)
+ elif other_op == "-":
+ arg2 = self._get_neg(arg2)
+ size -= 2
+ node_p.append(arg2)
+ elif grouping_strat == self.RIGHT:
+ while pointer + 2 < size and tokens[pointer+1] == token:
+ node_p.append([op_name, arg2])
+ node_p = node_p[-1]
+ tokens.pop(pointer+1)
+ arg2 = tokens.pop(pointer+1)
+ size -= 2
+ node_p.append(arg2)
+ elif grouping_strat == self.LEFT:
+ while pointer + 1 < size and tokens[pointer+1] == token:
+ if isinstance(op_name, str):
+ node_p[first_index] = [op_name, node_p[first_index], arg2]
+ else:
+ node_p[first_index] = op_name(node_p[first_index], arg2)
+ tokens.pop(pointer+1)
+ arg2 = tokens.pop(pointer+1)
+ size -= 2
+ node_p.append(arg2)
+ else:
+ node.append(arg2)
+ elif op_type == self.PREFIX:
+ assert grouping_strat is None
+ if pointer == size - 1 or self._is_op(tokens[pointer + 1]):
+ tokens[pointer] = self._missing_arguments_default[token]()
+ else:
+ node.append(tokens.pop(pointer+1))
+ size -= 1
+ elif op_type == self.POSTFIX:
+ assert grouping_strat is None
+ if pointer == 0 or self._is_op(tokens[pointer - 1]):
+ tokens[pointer] = self._missing_arguments_default[token]()
+ else:
+ node.append(tokens.pop(pointer-1))
+ pointer -= 1
+ size -= 1
+ if isinstance(op_name, Callable): # type: ignore
+ op_call: Callable = typing.cast(Callable, op_name)
+ new_node = op_call(*node)
+ node.clear()
+ if isinstance(new_node, list):
+ node.extend(new_node)
+ else:
+ tokens[pointer] = new_node
+ pointer += 1
+ if len(tokens) > 1 or (len(lines) == 0 and len(tokens) == 0):
+ if changed:
+ # Trick to deal with cases in which an operator with lower
+ # precedence should be transformed before an operator of higher
+ # precedence. Such as in the case of `#&[x]` (that is
+ # equivalent to `Lambda(d_, d_)(x)` in SymPy). In this case the
+ # operator `&` has lower precedence than `[`, but needs to be
+ # evaluated first because otherwise `# (&[x])` is not a valid
+ # expression:
+ return self._parse_after_braces(tokens, inside_enclosure)
+ raise SyntaxError("unable to create a single AST for the expression")
+ if len(lines) > 0:
+ if tokens[0] and tokens[0][0] == "CompoundExpression":
+ tokens = tokens[0][1:]
+ compound_expression = ["CompoundExpression", *lines, *tokens]
+ return compound_expression
+ return tokens[0]
+
+ def _check_op_compatible(self, op1: str, op2: str):
+ if op1 == op2:
+ return True
+ muldiv = {"*", "/"}
+ addsub = {"+", "-"}
+ if op1 in muldiv and op2 in muldiv:
+ return True
+ if op1 in addsub and op2 in addsub:
+ return True
+ return False
+
+ def _from_fullform_to_fullformlist(self, wmexpr: str):
+ """
+ Parses FullForm[Downvalues[]] generated by Mathematica
+ """
+ out: list = []
+ stack = [out]
+ generator = re.finditer(r'[\[\],]', wmexpr)
+ last_pos = 0
+ for match in generator:
+ if match is None:
+ break
+ position = match.start()
+ last_expr = wmexpr[last_pos:position].replace(',', '').replace(']', '').replace('[', '').strip()
+
+ if match.group() == ',':
+ if last_expr != '':
+ stack[-1].append(last_expr)
+ elif match.group() == ']':
+ if last_expr != '':
+ stack[-1].append(last_expr)
+ stack.pop()
+ elif match.group() == '[':
+ stack[-1].append([last_expr])
+ stack.append(stack[-1][-1])
+ last_pos = match.end()
+ return out[0]
+
+ def _from_fullformlist_to_fullformsympy(self, pylist: list):
+ from sympy import Function, Symbol
+
+ def converter(expr):
+ if isinstance(expr, list):
+ if len(expr) > 0:
+ head = expr[0]
+ args = [converter(arg) for arg in expr[1:]]
+ return Function(head)(*args)
+ else:
+ raise ValueError("error")
+ elif isinstance(expr, str):
+ return Symbol(expr)
+ else:
+ return _sympify(expr)
+
+ return converter(pylist)
+
+ _node_conversions = dict(
+ Times=Mul,
+ Plus=Add,
+ Power=Pow,
+ Log=lambda *a: log(*reversed(a)),
+ Log2=lambda x: log(x, 2),
+ Log10=lambda x: log(x, 10),
+ Exp=exp,
+ Sqrt=sqrt,
+
+ Sin=sin,
+ Cos=cos,
+ Tan=tan,
+ Cot=cot,
+ Sec=sec,
+ Csc=csc,
+
+ ArcSin=asin,
+ ArcCos=acos,
+ ArcTan=lambda *a: atan2(*reversed(a)) if len(a) == 2 else atan(*a),
+ ArcCot=acot,
+ ArcSec=asec,
+ ArcCsc=acsc,
+
+ Sinh=sinh,
+ Cosh=cosh,
+ Tanh=tanh,
+ Coth=coth,
+ Sech=sech,
+ Csch=csch,
+
+ ArcSinh=asinh,
+ ArcCosh=acosh,
+ ArcTanh=atanh,
+ ArcCoth=acoth,
+ ArcSech=asech,
+ ArcCsch=acsch,
+
+ Expand=expand,
+ Im=im,
+ Re=sympy.re,
+ Flatten=flatten,
+ Polylog=polylog,
+ Cancel=cancel,
+ # Gamma=gamma,
+ TrigExpand=expand_trig,
+ Sign=sign,
+ Simplify=simplify,
+ Defer=UnevaluatedExpr,
+ Identity=S,
+ # Sum=Sum_doit,
+ # Module=With,
+ # Block=With,
+ Null=lambda *a: S.Zero,
+ Mod=Mod,
+ Max=Max,
+ Min=Min,
+ Pochhammer=rf,
+ ExpIntegralEi=Ei,
+ SinIntegral=Si,
+ CosIntegral=Ci,
+ AiryAi=airyai,
+ AiryAiPrime=airyaiprime,
+ AiryBi=airybi,
+ AiryBiPrime=airybiprime,
+ LogIntegral=li,
+ PrimePi=primepi,
+ Prime=prime,
+ PrimeQ=isprime,
+
+ List=Tuple,
+ Greater=StrictGreaterThan,
+ GreaterEqual=GreaterThan,
+ Less=StrictLessThan,
+ LessEqual=LessThan,
+ Equal=Equality,
+ Or=Or,
+ And=And,
+
+ Function=_parse_Function,
+ )
+
+ _atom_conversions = {
+ "I": I,
+ "Pi": pi,
+ }
+
+ def _from_fullformlist_to_sympy(self, full_form_list):
+
+ def recurse(expr):
+ if isinstance(expr, list):
+ if isinstance(expr[0], list):
+ head = recurse(expr[0])
+ else:
+ head = self._node_conversions.get(expr[0], Function(expr[0]))
+ return head(*list(recurse(arg) for arg in expr[1:]))
+ else:
+ return self._atom_conversions.get(expr, sympify(expr))
+
+ return recurse(full_form_list)
+
+ def _from_fullformsympy_to_sympy(self, mform):
+
+ expr = mform
+ for mma_form, sympy_node in self._node_conversions.items():
+ expr = expr.replace(Function(mma_form), sympy_node)
+ return expr
| diff --git a/sympy/parsing/tests/test_mathematica.py b/sympy/parsing/tests/test_mathematica.py
index c6ad496d9491..8f48d93db7fc 100644
--- a/sympy/parsing/tests/test_mathematica.py
+++ b/sympy/parsing/tests/test_mathematica.py
@@ -1,5 +1,8 @@
-from sympy.parsing.mathematica import mathematica
+from sympy import sin, Function, symbols, Dummy, Lambda, cos
+from sympy.parsing.mathematica import mathematica, MathematicaParser
from sympy.core.sympify import sympify
+from sympy.abc import n, w, x, y, z
+from sympy.testing.pytest import raises
def test_mathematica():
@@ -16,7 +19,7 @@ def test_mathematica():
'Exp[Log[4]]': 'exp(log(4))',
'(x+1)(x+3)': '(x+1)*(x+3)',
'Cos[ArcCos[3.6]]': 'cos(acos(3.6))',
- 'Cos[x]==Sin[y]': 'cos(x)==sin(y)',
+ 'Cos[x]==Sin[y]': 'Eq(cos(x), sin(y))',
'2*Sin[x+y]': '2*sin(x+y)',
'Sin[x]+Cos[y]': 'sin(x)+cos(y)',
'Sin[Cos[x]]': 'sin(cos(x))',
@@ -31,12 +34,16 @@ def test_mathematica():
'(x+1)y': '(x+1)*y',
'x(y+1)': 'x*(y+1)',
'Sin[x]Cos[y]': 'sin(x)*cos(y)',
- 'Sin[x]**2Cos[y]**2': 'sin(x)**2*cos(y)**2',
+ 'Sin[x]^2Cos[y]^2': 'sin(x)**2*cos(y)**2',
'Cos[x]^2(1 - Cos[y]^2)': 'cos(x)**2*(1-cos(y)**2)',
'x y': 'x*y',
+ 'x y': 'x*y',
'2 x': '2*x',
'x 8': 'x*8',
'2 8': '2*8',
+ '4.x': '4.*x',
+ '4. 3': '4.*3',
+ '4. 3.': '4.*3.',
'1 2 3': '1*2*3',
' - 2 * Sqrt[ 2 3 * ( 1 + 5 ) ] ': '-2*sqrt(2*3*(1+5))',
'Log[2,4]': 'log(4,2)',
@@ -54,9 +61,9 @@ def test_mathematica():
'CosIntegral[x]': 'Ci(x)',
'AiryAi[x]': 'airyai(x)',
'AiryAiPrime[5]': 'airyaiprime(5)',
- 'AiryBi[x]' :'airybi(x)',
- 'AiryBiPrime[7]' :'airybiprime(7)',
- 'LogIntegral[4]':' li(4)',
+ 'AiryBi[x]': 'airybi(x)',
+ 'AiryBiPrime[7]': 'airybiprime(7)',
+ 'LogIntegral[4]': ' li(4)',
'PrimePi[7]': 'primepi(7)',
'Prime[5]': 'prime(5)',
'PrimeQ[5]': 'isprime(5)'
@@ -64,3 +71,192 @@ def test_mathematica():
for e in d:
assert mathematica(e) == sympify(d[e])
+
+ # The parsed form of this expression should not evaluate the Lambda object:
+ assert mathematica("Sin[#]^2 + Cos[#]^2 &[x]") == sin(x)**2 + cos(x)**2
+
+ d1, d2, d3 = symbols("d1:4", cls=Dummy)
+ assert mathematica("Sin[#] + Cos[#3] &").dummy_eq(Lambda((d1, d2, d3), sin(d1) + cos(d3)))
+ assert mathematica("Sin[#^2] &").dummy_eq(Lambda(d1, sin(d1**2)))
+ assert mathematica("Function[x, x^3]") == Lambda(x, x**3)
+ assert mathematica("Function[{x, y}, x^2 + y^2]") == Lambda((x, y), x**2+y**2)
+
+
+def test_parser_mathematica_tokenizer():
+ parser = MathematicaParser()
+
+ chain = lambda expr: parser._from_tokens_to_fullformlist(parser._from_mathematica_to_tokens(expr))
+
+ # Basic patterns
+ assert chain("x") == "x"
+ assert chain("42") == "42"
+ assert chain(".2") == ".2"
+ assert chain("+x") == "x"
+ assert chain("-1") == "-1"
+ assert chain("- 3") == "-3"
+ assert chain("+Sin[x]") == ["Sin", "x"]
+ assert chain("-Sin[x]") == ["Times", "-1", ["Sin", "x"]]
+ assert chain("x(a+1)") == ["Times", "x", ["Plus", "a", "1"]]
+ assert chain("(x)") == "x"
+ assert chain("(+x)") == "x"
+ assert chain("-a") == ["Times", "-1", "a"]
+ assert chain("(-x)") == ["Times", "-1", "x"]
+ assert chain("(x + y)") == ["Plus", "x", "y"]
+ assert chain("3 + 4") == ["Plus", "3", "4"]
+ assert chain("a - 3") == ["Plus", "a", "-3"]
+ assert chain("a - b") == ["Plus", "a", ["Times", "-1", "b"]]
+ assert chain("7 * 8") == ["Times", "7", "8"]
+ assert chain("a + b*c") == ["Plus", "a", ["Times", "b", "c"]]
+ assert chain("a + b* c* d + 2 * e") == ["Plus", "a", ["Times", "b", "c", "d"], ["Times", "2", "e"]]
+ assert chain("a / b") == ["Times", "a", ["Power", "b", "-1"]]
+
+ # Missing asterisk (*) patterns:
+ assert chain("x y") == ["Times", "x", "y"]
+ assert chain("3 4") == ["Times", "3", "4"]
+ assert chain("a[b] c") == ["Times", ["a", "b"], "c"]
+ assert chain("(x) (y)") == ["Times", "x", "y"]
+ assert chain("3 (a)") == ["Times", "3", "a"]
+ assert chain("(a) b") == ["Times", "a", "b"]
+ assert chain("4.2") == "4.2"
+ assert chain("4 2") == ["Times", "4", "2"]
+ assert chain("4 2") == ["Times", "4", "2"]
+ assert chain("3 . 4") == ["Dot", "3", "4"]
+ assert chain("4. 2") == ["Times", "4.", "2"]
+ assert chain("x.y") == ["Dot", "x", "y"]
+ assert chain("4.y") == ["Times", "4.", "y"]
+ assert chain("4 .y") == ["Dot", "4", "y"]
+ assert chain("x.4") == ["Times", "x", ".4"]
+ assert chain("x0.3") == ["Times", "x0", ".3"]
+ assert chain("x. 4") == ["Dot", "x", "4"]
+
+ # Comments
+ assert chain("a (* +b *) + c") == ["Plus", "a", "c"]
+ assert chain("a (* + b *) + (**)c (* +d *) + e") == ["Plus", "a", "c", "e"]
+ assert chain("""a + (*
+ + b
+ *) c + (* d
+ *) e
+ """) == ["Plus", "a", "c", "e"]
+
+ # Operators couples + and -, * and / are mutually associative:
+ # (i.e. expression gets flattened when mixing these operators)
+ assert chain("a*b/c") == ["Times", "a", "b", ["Power", "c", "-1"]]
+ assert chain("a/b*c") == ["Times", "a", ["Power", "b", "-1"], "c"]
+ assert chain("a+b-c") == ["Plus", "a", "b", ["Times", "-1", "c"]]
+ assert chain("a-b+c") == ["Plus", "a", ["Times", "-1", "b"], "c"]
+ assert chain("-a + b -c ") == ["Plus", ["Times", "-1", "a"], "b", ["Times", "-1", "c"]]
+ assert chain("a/b/c*d") == ["Times", "a", ["Power", "b", "-1"], ["Power", "c", "-1"], "d"]
+ assert chain("a/b/c") == ["Times", "a", ["Power", "b", "-1"], ["Power", "c", "-1"]]
+ assert chain("a-b-c") == ["Plus", "a", ["Times", "-1", "b"], ["Times", "-1", "c"]]
+ assert chain("1/a") == ["Times", "1", ["Power", "a", "-1"]]
+ assert chain("1/a/b") == ["Times", "1", ["Power", "a", "-1"], ["Power", "b", "-1"]]
+ assert chain("-1/a*b") == ["Times", "-1", ["Power", "a", "-1"], "b"]
+
+ # Enclosures of various kinds, i.e. ( ) [ ] [[ ]] { }
+ assert chain("(a + b) + c") == ["Plus", ["Plus", "a", "b"], "c"]
+ assert chain(" a + (b + c) + d ") == ["Plus", "a", ["Plus", "b", "c"], "d"]
+ assert chain("a * (b + c)") == ["Times", "a", ["Plus", "b", "c"]]
+ assert chain("a b (c d)") == ["Times", "a", "b", ["Times", "c", "d"]]
+ assert chain("{a, b, 2, c}") == ["List", "a", "b", "2", "c"]
+ assert chain("{a, {b, c}}") == ["List", "a", ["List", "b", "c"]]
+ assert chain("{{a}}") == ["List", ["List", "a"]]
+ assert chain("a[b, c]") == ["a", "b", "c"]
+ assert chain("a[[b, c]]") == ["Part", "a", "b", "c"]
+ assert chain("a[b[c]]") == ["a", ["b", "c"]]
+ assert chain("a[[b, c[[d, {e,f}]]]]") == ["Part", "a", "b", ["Part", "c", "d", ["List", "e", "f"]]]
+ assert chain("a[b[[c,d]]]") == ["a", ["Part", "b", "c", "d"]]
+ assert chain("a[[b[c]]]") == ["Part", "a", ["b", "c"]]
+ assert chain("a[[b[[c]]]]") == ["Part", "a", ["Part", "b", "c"]]
+ assert chain("a[[b[c[[d]]]]]") == ["Part", "a", ["b", ["Part", "c", "d"]]]
+ assert chain("a[b[[c[d]]]]") == ["a", ["Part", "b", ["c", "d"]]]
+ assert chain("x[[a+1, b+2, c+3]]") == ["Part", "x", ["Plus", "a", "1"], ["Plus", "b", "2"], ["Plus", "c", "3"]]
+ assert chain("x[a+1, b+2, c+3]") == ["x", ["Plus", "a", "1"], ["Plus", "b", "2"], ["Plus", "c", "3"]]
+ assert chain("{a+1, b+2, c+3}") == ["List", ["Plus", "a", "1"], ["Plus", "b", "2"], ["Plus", "c", "3"]]
+
+ # Flat operator:
+ assert chain("a*b*c*d*e") == ["Times", "a", "b", "c", "d", "e"]
+ assert chain("a +b + c+ d+e") == ["Plus", "a", "b", "c", "d", "e"]
+
+ # Right priority operator:
+ assert chain("a^b") == ["Power", "a", "b"]
+ assert chain("a^b^c") == ["Power", "a", ["Power", "b", "c"]]
+ assert chain("a^b^c^d") == ["Power", "a", ["Power", "b", ["Power", "c", "d"]]]
+
+ # Left priority operator:
+ assert chain("a/.b") == ["ReplaceAll", "a", "b"]
+ assert chain("a/.b/.c/.d") == ["ReplaceAll", ["ReplaceAll", ["ReplaceAll", "a", "b"], "c"], "d"]
+
+ assert chain("a//b") == ["a", "b"]
+ assert chain("a//b//c") == [["a", "b"], "c"]
+ assert chain("a//b//c//d") == [[["a", "b"], "c"], "d"]
+
+ # Compound expressions
+ assert chain("a;b") == ["CompoundExpression", "a", "b"]
+ assert chain("a;") == ["CompoundExpression", "a", "Null"]
+ assert chain("a;b;") == ["CompoundExpression", "a", "b", "Null"]
+ assert chain("a[b;c]") == ["a", ["CompoundExpression", "b", "c"]]
+ assert chain("a[b,c;d,e]") == ["a", "b", ["CompoundExpression", "c", "d"], "e"]
+ assert chain("a[b,c;,d]") == ["a", "b", ["CompoundExpression", "c", "Null"], "d"]
+
+ # New lines
+ assert chain("a\nb\n") == ["CompoundExpression", "a", "b"]
+ assert chain("a\n\nb\n (c \nd) \n") == ["CompoundExpression", "a", "b", ["Times", "c", "d"]]
+ assert chain("\na; b\nc") == ["CompoundExpression", "a", "b", "c"]
+ assert chain("a + \nb\n") == ["Plus", "a", "b"]
+ assert chain("a\nb; c; d\n e; (f \n g); h + \n i") == ["CompoundExpression", "a", "b", "c", "d", "e", ["Times", "f", "g"], ["Plus", "h", "i"]]
+ assert chain("\n{\na\nb; c; d\n e (f \n g); h + \n i\n\n}\n") == ["List", ["CompoundExpression", ["Times", "a", "b"], "c", ["Times", "d", "e", ["Times", "f", "g"]], ["Plus", "h", "i"]]]
+
+ # Patterns
+ assert chain("y_") == ["Pattern", "y", ["Blank"]]
+ assert chain("y_.") == ["Optional", ["Pattern", "y", ["Blank"]]]
+ assert chain("y__") == ["Pattern", "y", ["BlankSequence"]]
+ assert chain("y___") == ["Pattern", "y", ["BlankNullSequence"]]
+ assert chain("a[b_.,c_]") == ["a", ["Optional", ["Pattern", "b", ["Blank"]]], ["Pattern", "c", ["Blank"]]]
+ assert chain("b_. c") == ["Times", ["Optional", ["Pattern", "b", ["Blank"]]], "c"]
+
+ # Slots for lambda functions
+ assert chain("#") == ["Slot", "1"]
+ assert chain("#3") == ["Slot", "3"]
+ assert chain("#n") == ["Slot", "n"]
+ assert chain("##") == ["SlotSequence", "1"]
+ assert chain("##a") == ["SlotSequence", "a"]
+
+ # Lambda functions
+ assert chain("x&") == ["Function", "x"]
+ assert chain("#&") == ["Function", ["Slot", "1"]]
+ assert chain("#+3&") == ["Function", ["Plus", ["Slot", "1"], "3"]]
+ assert chain("#1 + #2&") == ["Function", ["Plus", ["Slot", "1"], ["Slot", "2"]]]
+ assert chain("# + #&") == ["Function", ["Plus", ["Slot", "1"], ["Slot", "1"]]]
+ assert chain("#&[x]") == [["Function", ["Slot", "1"]], "x"]
+ assert chain("#1 + #2 & [x, y]") == [["Function", ["Plus", ["Slot", "1"], ["Slot", "2"]]], "x", "y"]
+ assert chain("#1^2#2^3&") == ["Function", ["Times", ["Power", ["Slot", "1"], "2"], ["Power", ["Slot", "2"], "3"]]]
+
+ # Invalid expressions:
+ raises(SyntaxError, lambda: chain("(,"))
+ raises(SyntaxError, lambda: chain("()"))
+ raises(SyntaxError, lambda: chain("a (* b"))
+
+
+def test_parser_mathematica_exp_alt():
+ parser = MathematicaParser()
+
+ convert_chain2 = lambda expr: parser._from_fullformlist_to_fullformsympy(parser._from_fullform_to_fullformlist(expr))
+ convert_chain3 = lambda expr: parser._from_fullformsympy_to_sympy(convert_chain2(expr))
+
+ Sin, Times, Plus, Power = symbols("Sin Times Plus Power", cls=Function)
+
+ full_form1 = "Sin[Times[x, y]]"
+ full_form2 = "Plus[Times[x, y], z]"
+ full_form3 = "Sin[Times[x, Plus[y, z], Power[w, n]]]]"
+
+ assert parser._from_fullform_to_fullformlist(full_form1) == ["Sin", ["Times", "x", "y"]]
+ assert parser._from_fullform_to_fullformlist(full_form2) == ["Plus", ["Times", "x", "y"], "z"]
+ assert parser._from_fullform_to_fullformlist(full_form3) == ["Sin", ["Times", "x", ["Plus", "y", "z"], ["Power", "w", "n"]]]
+
+ assert convert_chain2(full_form1) == Sin(Times(x, y))
+ assert convert_chain2(full_form2) == Plus(Times(x, y), z)
+ assert convert_chain2(full_form3) == Sin(Times(x, Plus(y, z), Power(w, n)))
+
+ assert convert_chain3(full_form1) == sin(x*y)
+ assert convert_chain3(full_form2) == x*y + z
+ assert convert_chain3(full_form3) == sin(x*(y + z)*w**n)
| diff --git a/doc/src/explanation/active-deprecations.md b/doc/src/explanation/active-deprecations.md
index fa0328c0fdff..84fda0eb75f9 100644
--- a/doc/src/explanation/active-deprecations.md
+++ b/doc/src/explanation/active-deprecations.md
@@ -74,6 +74,37 @@ will need to either add a `warnings` filter as above or use pytest to filter
SymPy deprecation warnings.
```
+## Version 1.11
+
+(mathematica-parser-additional-translations)=
+### Mathematica parser: removed ``additional_translations`` parameter
+
+The ``additional_translations`` parameter for the Mathematica parser is now deprecated.
+Additional translation rules to convert Mathematica expressions into SymPy ones
+should be specified after the conversion using SymPy's .replace( ) or .subs( )
+methods on the output expression. If the translator fails to recognize the logical
+meaning of a Mathematica expression, a form similar to Mathematica's full form
+will be returned, using SymPy's ``Function`` object to encode the nodes of the
+syntax tree.
+
+For example, suppose you want ``F`` to be a function that returns the maximum
+value multiplied by the minimum value, the previous way to
+specify this conversion was:
+
+```py
+>>> from sympy.parsing.mathematica import mathematica
+>>> mathematica('F[7,5,3]', {'F[*x]':'Max(*x)*Min(*x)'})
+21
+```
+
+Now you can do the same with
+
+```py
+>>> from sympy import Function, Max, Min
+>>> mathematica("F[7,5,3]").replace(Function("F"), lambda *x: Max(*x)*Min(*x))
+21
+```
+
## Version 1.10
(deprecated-traversal-functions-moved)=
| [
{
"components": [
{
"doc": "",
"lines": [
82,
98
],
"name": "_parse_Function",
"signature": "def _parse_Function(*args):",
"type": "function"
},
{
"doc": "",
"lines": [
501,
528
],
... | [
"test_mathematica",
"test_parser_mathematica_tokenizer"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Rewritten Wolfram Mathematica language parser from scratch
New Wolfram Mathematica language parser.
In theory, it should be able to parse everything.
<!-- BEGIN RELEASE NOTES -->
* parsing
* New powerful Mathematica parser replacing the old one. Language support greatly improved.
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/parsing/mathematica.py]
(definition of _parse_Function:)
def _parse_Function(*args):
(definition of MathematicaParser._parse_old:)
def _parse_old(self, s):
(definition of MathematicaParser._get_neg:)
def _get_neg(cls, x):
(definition of MathematicaParser._get_inv:)
def _get_inv(cls, x):
(definition of MathematicaParser._get_tokenizer:)
def _get_tokenizer(self):
(definition of MathematicaParser._from_mathematica_to_tokens:)
def _from_mathematica_to_tokens(self, code: str):
(definition of MathematicaParser._is_op:)
def _is_op(self, token: tUnion[str, list]) -> bool:
(definition of MathematicaParser._is_valid_star1:)
def _is_valid_star1(self, token: tUnion[str, list]) -> bool:
(definition of MathematicaParser._is_valid_star2:)
def _is_valid_star2(self, token: tUnion[str, list]) -> bool:
(definition of MathematicaParser._from_tokens_to_fullformlist:)
def _from_tokens_to_fullformlist(self, tokens: list):
(definition of MathematicaParser._util_remove_newlines:)
def _util_remove_newlines(self, lines: list, tokens: list, inside_enclosure: bool):
(definition of MathematicaParser._util_add_missing_asterisks:)
def _util_add_missing_asterisks(self, tokens: list):
(definition of MathematicaParser._parse_after_braces:)
def _parse_after_braces(self, tokens: list, inside_enclosure: bool = False):
(definition of MathematicaParser._check_op_compatible:)
def _check_op_compatible(self, op1: str, op2: str):
(definition of MathematicaParser._from_fullform_to_fullformlist:)
def _from_fullform_to_fullformlist(self, wmexpr: str):
"""Parses FullForm[Downvalues[]] generated by Mathematica"""
(definition of MathematicaParser._from_fullformlist_to_fullformsympy:)
def _from_fullformlist_to_fullformsympy(self, pylist: list):
(definition of MathematicaParser._from_fullformlist_to_fullformsympy.converter:)
def converter(expr):
(definition of MathematicaParser._from_fullformlist_to_sympy:)
def _from_fullformlist_to_sympy(self, full_form_list):
(definition of MathematicaParser._from_fullformlist_to_sympy.recurse:)
def recurse(expr):
(definition of MathematicaParser._from_fullformsympy_to_sympy:)
def _from_fullformsympy_to_sympy(self, mform):
[end of new definitions in sympy/parsing/mathematica.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | edf24253833ca153cb6d29ae54092ecebe29614c | |
boto__botocore-2606 | 2,606 | boto/botocore | null | 6c6ca961d0ba537b8f5cb8ea004a69b9b5dea10f | 2022-02-03T22:56:45Z | diff --git a/.changes/next-release/enhancement-RequestHeader-82118.json b/.changes/next-release/enhancement-RequestHeader-82118.json
new file mode 100644
index 0000000000..f6641fedaf
--- /dev/null
+++ b/.changes/next-release/enhancement-RequestHeader-82118.json
@@ -0,0 +1,5 @@
+{
+ "type": "enhancement",
+ "category": "Lambda Request Header",
+ "description": "Adding request header for Lambda recursion detection."
+}
diff --git a/botocore/handlers.py b/botocore/handlers.py
index 17b1df3a30..466d09d03d 100644
--- a/botocore/handlers.py
+++ b/botocore/handlers.py
@@ -16,6 +16,7 @@
This module contains builtin handlers for events emitted by botocore.
"""
+import os
import base64
import logging
import copy
@@ -26,7 +27,7 @@
from botocore.compat import (
unquote, json, six, unquote_str, ensure_bytes, get_md5,
OrderedDict, urlsplit, urlunsplit, XMLParseError,
- ETree,
+ ETree, quote,
)
from botocore.docs.utils import AutoPopulatedParam
from botocore.docs.utils import HideParamFromOperations
@@ -84,6 +85,15 @@ def handle_service_name_alias(service_name, **kwargs):
return SERVICE_NAME_ALIASES.get(service_name, service_name)
+def add_recursion_detection_header(params, **kwargs):
+ has_lambda_name = 'AWS_LAMBDA_FUNCTION_NAME' in os.environ
+ trace_id = os.environ.get('_X_AMZ_TRACE_ID')
+ if has_lambda_name and trace_id:
+ headers = params['headers']
+ if 'X-Amzn-Trace-Id' not in headers:
+ headers['X-Amzn-Trace-Id'] = quote(trace_id)
+
+
def escape_xml_payload(params, **kwargs):
# Replace \r and \n with the escaped sequence over the whole XML document
# to avoid linebreak normalization modifying customer input when the
@@ -986,6 +996,7 @@ def remove_lex_v2_start_conversation(class_attributes, **kwargs):
('docs.*.s3.CopyObject.complete-section', document_copy_source_form),
('docs.*.s3.UploadPartCopy.complete-section', document_copy_source_form),
+ ('before-call', add_recursion_detection_header),
('before-call.s3', add_expect_header),
('before-call.glacier', add_glacier_version),
('before-call.apigateway', add_accept_header),
| diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py
index 6d0eb6b41e..19b4fe3706 100644
--- a/tests/unit/test_handlers.py
+++ b/tests/unit/test_handlers.py
@@ -17,6 +17,7 @@
import copy
import os
import json
+import pytest
import botocore
import botocore.session
@@ -1368,3 +1369,24 @@ def test_does_validate_long_host(self):
def test_does_validate_host_with_illegal_char(self):
with self.assertRaises(ParamValidationError):
self._prepend_to_host('https://example.com/path', 'host#name')
+
+
+@pytest.mark.parametrize(
+ 'environ, header_before, header_after',
+ [
+ ({'AWS_LAMBDA_FUNCTION_NAME': 'foo'}, {}, {}),
+ ({'_X_AMZ_TRACE_ID': 'bar'}, {}, {}),
+ ({'AWS_LAMBDA_FUNCTION_NAME': 'foo', '_X_AMZ_TRACE_ID': 'bar'},
+ {}, {'X-Amzn-Trace-Id': 'bar'}),
+ ({'AWS_LAMBDA_FUNCTION_NAME': 'foo', '_X_AMZ_TRACE_ID': 'bar'},
+ {'X-Amzn-Trace-Id': 'fizz'}, {'X-Amzn-Trace-Id': 'fizz'}),
+ ({'AWS_LAMBDA_FUNCTION_NAME': 'foo',
+ '_X_AMZ_TRACE_ID': 'first\nsecond'},
+ {}, {'X-Amzn-Trace-Id': 'first%0Asecond'})
+ ]
+)
+def test_add_recursion_detection_header(environ, header_before, header_after):
+ request_dict = {'headers': header_before}
+ with mock.patch('os.environ', environ):
+ handlers.add_recursion_detection_header(request_dict)
+ assert request_dict['headers'] == header_after
| diff --git a/.changes/next-release/enhancement-RequestHeader-82118.json b/.changes/next-release/enhancement-RequestHeader-82118.json
new file mode 100644
index 0000000000..f6641fedaf
--- /dev/null
+++ b/.changes/next-release/enhancement-RequestHeader-82118.json
@@ -0,0 +1,5 @@
+{
+ "type": "enhancement",
+ "category": "Lambda Request Header",
+ "description": "Adding request header for Lambda recursion detection."
+}
| [
{
"components": [
{
"doc": "",
"lines": [
88,
94
],
"name": "add_recursion_detection_header",
"signature": "def add_recursion_detection_header(params, **kwargs):",
"type": "function"
}
],
"file": "botocore/handlers.py"
}
] | [
"tests/unit/test_handlers.py::test_add_recursion_detection_header[environ0-header_before0-header_after0]",
"tests/unit/test_handlers.py::test_add_recursion_detection_header[environ1-header_before1-header_after1]",
"tests/unit/test_handlers.py::test_add_recursion_detection_header[environ2-header_before2-header_a... | [
"tests/unit/test_handlers.py::TestHandlers::test_200_response_with_no_error_left_untouched",
"tests/unit/test_handlers.py::TestHandlers::test_500_response_can_be_none",
"tests/unit/test_handlers.py::TestHandlers::test_500_status_code_set_for_200_response",
"tests/unit/test_handlers.py::TestHandlers::test_acce... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Adding recursion detection headers
Adding request headers for lambda recursion detection.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in botocore/handlers.py]
(definition of add_recursion_detection_header:)
def add_recursion_detection_header(params, **kwargs):
[end of new definitions in botocore/handlers.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 5e4b564dd0f9aab16a404251ebd3e675c9681492 | |
matplotlib__matplotlib-22387 | 22,387 | matplotlib/matplotlib | 3.5 | 2e70254e860d7946035a3b0e50be9e08e78fd36e | 2022-02-03T22:25:29Z | diff --git a/doc/api/colors_api.rst b/doc/api/colors_api.rst
index 44f8cca303fd..970986ff4438 100644
--- a/doc/api/colors_api.rst
+++ b/doc/api/colors_api.rst
@@ -14,27 +14,44 @@
:no-members:
:no-inherited-members:
-Classes
--------
+Color norms
+-----------
.. autosummary::
:toctree: _as_gen/
:template: autosummary.rst
+ Normalize
+ NoNorm
AsinhNorm
BoundaryNorm
- Colormap
CenteredNorm
- LightSource
- LinearSegmentedColormap
- ListedColormap
+ FuncNorm
LogNorm
- NoNorm
- Normalize
PowerNorm
SymLogNorm
TwoSlopeNorm
- FuncNorm
+
+Colormaps
+---------
+
+.. autosummary::
+ :toctree: _as_gen/
+ :template: autosummary.rst
+
+ Colormap
+ LinearSegmentedColormap
+ ListedColormap
+
+Other classes
+-------------
+
+.. autosummary::
+ :toctree: _as_gen/
+ :template: autosummary.rst
+
+ ColorSequenceRegistry
+ LightSource
Functions
---------
diff --git a/doc/api/matplotlib_configuration_api.rst b/doc/api/matplotlib_configuration_api.rst
index 3636c45d0c71..c301149b8050 100644
--- a/doc/api/matplotlib_configuration_api.rst
+++ b/doc/api/matplotlib_configuration_api.rst
@@ -52,12 +52,15 @@ Logging
.. autofunction:: set_loglevel
-Colormaps
-=========
+Colormaps and color sequences
+=============================
.. autodata:: colormaps
:no-value:
+.. autodata:: color_sequences
+ :no-value:
+
Miscellaneous
=============
diff --git a/doc/api/pyplot_summary.rst b/doc/api/pyplot_summary.rst
index 8d18c8b67e3e..30454486f14a 100644
--- a/doc/api/pyplot_summary.rst
+++ b/doc/api/pyplot_summary.rst
@@ -31,3 +31,6 @@ For a more in-depth look at colormaps, see the
.. autodata:: colormaps
:no-value:
+
+.. autodata:: color_sequences
+ :no-value:
diff --git a/lib/matplotlib/__init__.py b/lib/matplotlib/__init__.py
index 2a495a91eb0d..7e8f6efa9af4 100644
--- a/lib/matplotlib/__init__.py
+++ b/lib/matplotlib/__init__.py
@@ -1455,3 +1455,4 @@ def inner(ax, *args, data=None, **kwargs):
# workaround: we must defer colormaps import to after loading rcParams, because
# colormap creation depends on rcParams
from matplotlib.cm import _colormaps as colormaps
+from matplotlib.colors import _color_sequences as color_sequences
diff --git a/lib/matplotlib/colors.py b/lib/matplotlib/colors.py
index 2e519149527f..ed051e304405 100644
--- a/lib/matplotlib/colors.py
+++ b/lib/matplotlib/colors.py
@@ -40,7 +40,7 @@
"""
import base64
-from collections.abc import Sized, Sequence
+from collections.abc import Sized, Sequence, Mapping
import copy
import functools
import importlib
@@ -54,7 +54,7 @@
import matplotlib as mpl
import numpy as np
-from matplotlib import _api, cbook, scale
+from matplotlib import _api, _cm, cbook, scale
from ._color_data import BASE_COLORS, TABLEAU_COLORS, CSS4_COLORS, XKCD_COLORS
@@ -94,6 +94,113 @@ def get_named_colors_mapping():
return _colors_full_map
+class ColorSequenceRegistry(Mapping):
+ r"""
+ Container for sequences of colors that are known to Matplotlib by name.
+
+ The universal registry instance is `matplotlib.color_sequences`. There
+ should be no need for users to instantiate `.ColorSequenceRegistry`
+ themselves.
+
+ Read access uses a dict-like interface mapping names to lists of colors::
+
+ import matplotlib as mpl
+ cmap = mpl.color_sequences['tab10']
+
+ The returned lists are copies, so that their modification does not change
+ the global definition of the color sequence.
+
+ Additional color sequences can be added via
+ `.ColorSequenceRegistry.register`::
+
+ mpl.color_sequences.register('rgb', ['r', 'g', 'b'])
+ """
+
+ _BUILTIN_COLOR_SEQUENCES = {
+ 'tab10': _cm._tab10_data,
+ 'tab20': _cm._tab20_data,
+ 'tab20b': _cm._tab20b_data,
+ 'tab20c': _cm._tab20c_data,
+ 'Pastel1': _cm._Pastel1_data,
+ 'Pastel2': _cm._Pastel2_data,
+ 'Paired': _cm._Paired_data,
+ 'Accent': _cm._Accent_data,
+ 'Dark2': _cm._Dark2_data,
+ 'Set1': _cm._Set1_data,
+ 'Set2': _cm._Set1_data,
+ 'Set3': _cm._Set1_data,
+ }
+
+ def __init__(self):
+ self._color_sequences = {**self._BUILTIN_COLOR_SEQUENCES}
+
+ def __getitem__(self, item):
+ try:
+ return list(self._color_sequences[item])
+ except KeyError:
+ raise KeyError(f"{item!r} is not a known color sequence name")
+
+ def __iter__(self):
+ return iter(self._color_sequences)
+
+ def __len__(self):
+ return len(self._color_sequences)
+
+ def __str__(self):
+ return ('ColorSequenceRegistry; available colormaps:\n' +
+ ', '.join(f"'{name}'" for name in self))
+
+ def register(self, name, color_list):
+ """
+ Register a new color sequence.
+
+ The color sequence registry stores a copy of the given *color_list*, so
+ that future changes to the original list do not affect the registered
+ color sequence. Think of this as the registry taking a snapshot
+ of *color_list* at registration.
+
+ Parameters
+ ----------
+ name : str
+ The name for the color sequence.
+
+ color_list : list of colors
+ An iterable returning valid Matplotlib colors when iterating over.
+ Note however that the returned color sequence will always be a
+ list regardless of the input type.
+
+ """
+ if name in self._BUILTIN_COLOR_SEQUENCES:
+ raise ValueError(f"{name!r} is a reserved name for a builtin "
+ "color sequence")
+
+ color_list = list(color_list) # force copy and coerce type to list
+ for color in color_list:
+ try:
+ to_rgba(color)
+ except ValueError:
+ raise ValueError(
+ f"{color!r} is not a valid color specification")
+
+ self._color_sequences[name] = color_list
+
+ def unregister(self, name):
+ """
+ Remove a sequence from the registry.
+
+ You cannot remove built-in color sequences.
+
+ If the name is not registered, returns with no error.
+ """
+ if name in self._BUILTIN_COLOR_SEQUENCES:
+ raise ValueError(
+ f"Cannot unregister builtin color sequence {name!r}")
+ self._color_sequences.pop(name, None)
+
+
+_color_sequences = ColorSequenceRegistry()
+
+
def _sanitize_extrema(ex):
if ex is None:
return ex
diff --git a/lib/matplotlib/pyplot.py b/lib/matplotlib/pyplot.py
index 577956bedce6..a06daab90a7d 100644
--- a/lib/matplotlib/pyplot.py
+++ b/lib/matplotlib/pyplot.py
@@ -71,6 +71,7 @@
from matplotlib import cm
from matplotlib.cm import _colormaps as colormaps, get_cmap, register_cmap
+from matplotlib.colors import _color_sequences as color_sequences
import numpy as np
| diff --git a/lib/matplotlib/tests/test_colors.py b/lib/matplotlib/tests/test_colors.py
index 80f8b663bdce..c9aed221108e 100644
--- a/lib/matplotlib/tests/test_colors.py
+++ b/lib/matplotlib/tests/test_colors.py
@@ -1510,3 +1510,43 @@ def test_make_norm_from_scale_name():
logitnorm = mcolors.make_norm_from_scale(
mscale.LogitScale, mcolors.Normalize)
assert logitnorm.__name__ == logitnorm.__qualname__ == "LogitScaleNorm"
+
+
+def test_color_sequences():
+ # basic access
+ assert plt.color_sequences is matplotlib.color_sequences # same registry
+ assert list(plt.color_sequences) == [
+ 'tab10', 'tab20', 'tab20b', 'tab20c', 'Pastel1', 'Pastel2', 'Paired',
+ 'Accent', 'Dark2', 'Set1', 'Set2', 'Set3']
+ assert len(plt.color_sequences['tab10']) == 10
+ assert len(plt.color_sequences['tab20']) == 20
+
+ tab_colors = [
+ 'tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple',
+ 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']
+ for seq_color, tab_color in zip(plt.color_sequences['tab10'], tab_colors):
+ assert mcolors.same_color(seq_color, tab_color)
+
+ # registering
+ with pytest.raises(ValueError, match="reserved name"):
+ plt.color_sequences.register('tab10', ['r', 'g', 'b'])
+ with pytest.raises(ValueError, match="not a valid color specification"):
+ plt.color_sequences.register('invalid', ['not a color'])
+
+ rgb_colors = ['r', 'g', 'b']
+ plt.color_sequences.register('rgb', rgb_colors)
+ assert plt.color_sequences['rgb'] == ['r', 'g', 'b']
+ # should not affect the registered sequence because input is copied
+ rgb_colors.append('c')
+ assert plt.color_sequences['rgb'] == ['r', 'g', 'b']
+ # should not affect the registered sequence because returned list is a copy
+ plt.color_sequences['rgb'].append('c')
+ assert plt.color_sequences['rgb'] == ['r', 'g', 'b']
+
+ # unregister
+ plt.color_sequences.unregister('rgb')
+ with pytest.raises(KeyError):
+ plt.color_sequences['rgb'] # rgb is gone
+ plt.color_sequences.unregister('rgb') # multiple unregisters are ok
+ with pytest.raises(ValueError, match="Cannot unregister builtin"):
+ plt.color_sequences.unregister('tab10')
| diff --git a/doc/api/colors_api.rst b/doc/api/colors_api.rst
index 44f8cca303fd..970986ff4438 100644
--- a/doc/api/colors_api.rst
+++ b/doc/api/colors_api.rst
@@ -14,27 +14,44 @@
:no-members:
:no-inherited-members:
-Classes
--------
+Color norms
+-----------
.. autosummary::
:toctree: _as_gen/
:template: autosummary.rst
+ Normalize
+ NoNorm
AsinhNorm
BoundaryNorm
- Colormap
CenteredNorm
- LightSource
- LinearSegmentedColormap
- ListedColormap
+ FuncNorm
LogNorm
- NoNorm
- Normalize
PowerNorm
SymLogNorm
TwoSlopeNorm
- FuncNorm
+
+Colormaps
+---------
+
+.. autosummary::
+ :toctree: _as_gen/
+ :template: autosummary.rst
+
+ Colormap
+ LinearSegmentedColormap
+ ListedColormap
+
+Other classes
+-------------
+
+.. autosummary::
+ :toctree: _as_gen/
+ :template: autosummary.rst
+
+ ColorSequenceRegistry
+ LightSource
Functions
---------
diff --git a/doc/api/matplotlib_configuration_api.rst b/doc/api/matplotlib_configuration_api.rst
index 3636c45d0c71..c301149b8050 100644
--- a/doc/api/matplotlib_configuration_api.rst
+++ b/doc/api/matplotlib_configuration_api.rst
@@ -52,12 +52,15 @@ Logging
.. autofunction:: set_loglevel
-Colormaps
-=========
+Colormaps and color sequences
+=============================
.. autodata:: colormaps
:no-value:
+.. autodata:: color_sequences
+ :no-value:
+
Miscellaneous
=============
diff --git a/doc/api/pyplot_summary.rst b/doc/api/pyplot_summary.rst
index 8d18c8b67e3e..30454486f14a 100644
--- a/doc/api/pyplot_summary.rst
+++ b/doc/api/pyplot_summary.rst
@@ -31,3 +31,6 @@ For a more in-depth look at colormaps, see the
.. autodata:: colormaps
:no-value:
+
+.. autodata:: color_sequences
+ :no-value:
| [
{
"components": [
{
"doc": "Container for sequences of colors that are known to Matplotlib by name.\n\nThe universal registry instance is `matplotlib.color_sequences`. There\nshould be no need for users to instantiate `.ColorSequenceRegistry`\nthemselves.\n\nRead access uses a dict-like interface ... | [
"lib/matplotlib/tests/test_colors.py::test_color_sequences"
] | [
"lib/matplotlib/tests/test_colors.py::test_create_lookup_table[5-result0]",
"lib/matplotlib/tests/test_colors.py::test_create_lookup_table[2-result1]",
"lib/matplotlib/tests/test_colors.py::test_create_lookup_table[1-result2]",
"lib/matplotlib/tests/test_colors.py::test_resample",
"lib/matplotlib/tests/test... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add a registry for color sequences
Color sequences are simply lists of colors, that we store by name in
a registry. The registry is modelled similar to the ColormapRegistry
to 1) support immutable builtin color sequences and 2) to return copies
so that one cannot mess with the global definition of the color sequence
through an obtained instance.
Note that some details of `ColormapRegistry` are different and we need
different docstrings so that it's easier to have a separate class and not try
to factor out some common aspects in a base class.
For now, I've made the sequences used for `ListedColormap`s available
as builtin sequences, but that's open for discussion.
More usage documentation should be added in the color examples and/or
tutorials, but I'll wait with that till after the general approval of
the structure and API. One common use case will be
```
plt.rc_params['axes.prop_cycle'] = plt.cycler(color=plt.color_sequences['Pastel1')
```
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in lib/matplotlib/colors.py]
(definition of ColorSequenceRegistry:)
class ColorSequenceRegistry(Mapping):
"""Container for sequences of colors that are known to Matplotlib by name.
The universal registry instance is `matplotlib.color_sequences`. There
should be no need for users to instantiate `.ColorSequenceRegistry`
themselves.
Read access uses a dict-like interface mapping names to lists of colors::
import matplotlib as mpl
cmap = mpl.color_sequences['tab10']
The returned lists are copies, so that their modification does not change
the global definition of the color sequence.
Additional color sequences can be added via
`.ColorSequenceRegistry.register`::
mpl.color_sequences.register('rgb', ['r', 'g', 'b'])"""
(definition of ColorSequenceRegistry.__init__:)
def __init__(self):
(definition of ColorSequenceRegistry.__getitem__:)
def __getitem__(self, item):
(definition of ColorSequenceRegistry.__iter__:)
def __iter__(self):
(definition of ColorSequenceRegistry.__len__:)
def __len__(self):
(definition of ColorSequenceRegistry.__str__:)
def __str__(self):
(definition of ColorSequenceRegistry.register:)
def register(self, name, color_list):
"""Register a new color sequence.
The color sequence registry stores a copy of the given *color_list*, so
that future changes to the original list do not affect the registered
color sequence. Think of this as the registry taking a snapshot
of *color_list* at registration.
Parameters
----------
name : str
The name for the color sequence.
color_list : list of colors
An iterable returning valid Matplotlib colors when iterating over.
Note however that the returned color sequence will always be a
list regardless of the input type."""
(definition of ColorSequenceRegistry.unregister:)
def unregister(self, name):
"""Remove a sequence from the registry.
You cannot remove built-in color sequences.
If the name is not registered, returns with no error."""
[end of new definitions in lib/matplotlib/colors.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3d6c3da884fafae4654df68144391cfe9be6f134 | |
conan-io__conan-10507 | 10,507 | conan-io/conan | null | a2e5d1279748956b3878c8de415bf204043a09cd | 2022-02-03T17:46:33Z | diff --git a/conans/cli/api/subapi/install.py b/conans/cli/api/subapi/install.py
index b04b9be0d58..9edf7a15899 100644
--- a/conans/cli/api/subapi/install.py
+++ b/conans/cli/api/subapi/install.py
@@ -1,9 +1,13 @@
+import os
+
from conan import ConanFile
from conans.cli.api.subapi import api_method
from conans.cli.conan_app import ConanApp
from conans.client.generators import write_generators
-
from conans.client.installer import BinaryInstaller, call_system_requirements
+from conans.client.loader import load_python_file
+from conans.errors import ConanException
+from conans.util.files import rmdir
class InstallAPI:
@@ -25,8 +29,8 @@ def install_binaries(self, deps_graph, remotes=None, update=False):
installer.install(deps_graph)
# TODO: Look for a better name
- @staticmethod
- def install_consumer(deps_graph, generators=None, source_folder=None, output_folder=None):
+ def install_consumer(self, deps_graph, generators=None, source_folder=None, output_folder=None,
+ deploy=False):
""" Once a dependency graph has been installed, there are things to be done, like invoking
generators for the root consumer.
This is necessary for example for conanfile.txt/py, or for "conan install <ref> -g
@@ -38,9 +42,97 @@ def install_consumer(deps_graph, generators=None, source_folder=None, output_fol
conanfile.folders.set_base_generators(output_folder)
conanfile.folders.set_base_build(output_folder)
+ _do_deploys(self.conan_api, deps_graph, deploy, output_folder)
+
# Add cli -g generators
conanfile.generators = list(set(conanfile.generators).union(generators or []))
write_generators(conanfile)
if type(conanfile).system_requirements != ConanFile.system_requirements:
call_system_requirements(conanfile)
+
+
+# TODO: Look for a better location for the deployers code
+def _find_deployer(d, cache_deploy_folder):
+ """ implements the logic of finding a deployer, with priority:
+ - 1) absolute paths
+ - 2) relative to cwd
+ - 3) in the cache/extensions/deploy folder
+ - 4) built-in
+ """
+ def _load(path):
+ mod, _ = load_python_file(path)
+ return mod.deploy
+
+ if not d.endswith(".py"):
+ d += ".py" # Deployers must be python files
+ if os.path.isabs(d):
+ return _load(d)
+ cwd = os.getcwd()
+ local_path = os.path.normpath(os.path.join(cwd, d))
+ if os.path.isfile(local_path):
+ return _load(local_path)
+ cache_path = os.path.join(cache_deploy_folder, d)
+ if os.path.isfile(cache_path):
+ return _load(cache_path)
+ builtin_deploy = {"full_deploy.py": full_deploy,
+ "direct_deploy.py": direct_deploy}.get(d)
+ if builtin_deploy is not None:
+ return builtin_deploy
+ raise ConanException(f"Cannot find deployer '{d}'")
+
+
+def _do_deploys(conan_api, graph, deploy, output_folder):
+ # Handle the deploys
+ cache_deploy_folder = os.path.join(conan_api.cache_folder, "extensions", "deploy")
+ for d in deploy or []:
+ deployer = _find_deployer(d, cache_deploy_folder)
+ # IMPORTANT: Use always kwargs to not break if it changes in the future
+ conanfile = graph.root.conanfile
+ deployer(conanfile=conanfile, output_folder=output_folder)
+
+
+def full_deploy(conanfile, output_folder):
+ """
+ Deploys to output_folder + host/dep/0.1/Release/x86_64 subfolder
+ """
+ # TODO: This deployer needs to be put somewhere else
+ # TODO: Document that this will NOT work with editables
+ import os
+ import shutil
+
+ conanfile.output.info(f"Conan built-in full deployer to {output_folder}")
+ for dep in conanfile.dependencies.values():
+ folder_name = os.path.join(dep.context, dep.ref.name, str(dep.ref.version))
+ build_type = str(dep.info.settings.build_type)
+ arch = str(dep.info.settings.arch)
+ if build_type:
+ folder_name = os.path.join(folder_name, build_type)
+ if arch:
+ folder_name = os.path.join(folder_name, arch)
+ new_folder = os.path.join(output_folder, folder_name)
+ if os.path.isdir(new_folder):
+ rmdir(new_folder)
+ shutil.copytree(dep.package_folder, new_folder)
+ dep.set_deploy_folder(new_folder)
+
+
+def direct_deploy(conanfile, output_folder):
+ """
+ Deploys to output_folder a single package,
+ """
+ # TODO: This deployer needs to be put somewhere else
+ # TODO: Document that this will NOT work with editables
+ import os
+ import shutil
+
+ conanfile.output.info(f"Conan built-in pkg deployer to {output_folder}")
+ # If the argument is --reference, the current conanfile is a virtual one with 1 single
+ # dependency, the "reference" package. If the argument is a local path, then all direct
+ # dependencies
+ for dep in conanfile.dependencies.filter({"direct": True}).values():
+ new_folder = os.path.join(output_folder, dep.ref.name)
+ if os.path.isdir(new_folder):
+ rmdir(new_folder)
+ shutil.copytree(dep.package_folder, new_folder)
+ dep.set_deploy_folder(new_folder)
diff --git a/conans/cli/commands/build.py b/conans/cli/commands/build.py
index 64c69666d9b..82a0e6ec5df 100644
--- a/conans/cli/commands/build.py
+++ b/conans/cli/commands/build.py
@@ -47,7 +47,7 @@ def build(conan_api, parser, *args):
source_folder = make_abs_path(args.source_folder, cwd) if args.source_folder else folder
output_folder = make_abs_path(args.output_folder, cwd) if args.output_folder else folder
- out.highlight("\n-------- Finalizing install (imports, deploy, generators) ----------")
+ out.highlight("\n-------- Finalizing install (deploy, generators) ----------")
conan_api.install.install_consumer(deps_graph=deps_graph, source_folder=source_folder,
output_folder=output_folder)
diff --git a/conans/cli/commands/install.py b/conans/cli/commands/install.py
index 672c5426465..795f2e28c9f 100644
--- a/conans/cli/commands/install.py
+++ b/conans/cli/commands/install.py
@@ -1,10 +1,10 @@
import os
-from conans.cli.commands import make_abs_path
-from conans.cli.formatters.graph import print_graph_basic, print_graph_packages
from conans.cli.command import conan_command, Extender, COMMAND_GROUPS, OnceArgument
+from conans.cli.commands import make_abs_path
from conans.cli.common import _add_common_install_arguments, _help_build_policies, \
get_profiles_from_args, get_lockfile, get_multiple_remotes
+from conans.cli.formatters.graph import print_graph_basic, print_graph_packages
from conans.cli.output import ConanOutput
from conans.errors import ConanException
from conans.model.recipe_ref import RecipeReference
@@ -143,7 +143,8 @@ def install(conan_api, parser, *args):
parser.add_argument("-of", "--output-folder",
help='The root output folder for generated and build files')
parser.add_argument("-sf", "--source-folder", help='The root source folder')
-
+ parser.add_argument("--deploy", action=Extender,
+ help='Deploy using the provided deployer to the output folder')
args = parser.parse_args(*args)
# parameter validation
@@ -161,7 +162,6 @@ def install(conan_api, parser, *args):
source_folder = make_abs_path(args.source_folder, cwd)
if args.output_folder:
output_folder = make_abs_path(args.output_folder, cwd)
- deploy = True if args.reference else False
remote = get_multiple_remotes(conan_api, args.remote)
@@ -170,11 +170,15 @@ def install(conan_api, parser, *args):
out = ConanOutput()
out.highlight("\n-------- Installing packages ----------")
conan_api.install.install_binaries(deps_graph=deps_graph, remotes=remote, update=args.update)
- out.highlight("\n-------- Finalizing install (generators) ----------")
+
+ out.highlight("\n-------- Finalizing install (deploy, generators) ----------")
conan_api.install.install_consumer(deps_graph=deps_graph,
generators=args.generator,
source_folder=source_folder,
- output_folder=output_folder)
+ output_folder=output_folder,
+ deploy=args.deploy
+ )
+
if args.lockfile_out:
lockfile_out = make_abs_path(args.lockfile_out, cwd)
out.info(f"Saving lockfile: {lockfile_out}")
diff --git a/conans/model/build_info.py b/conans/model/build_info.py
index ac412a6a7ea..1188f16d1ee 100644
--- a/conans/model/build_info.py
+++ b/conans/model/build_info.py
@@ -363,6 +363,19 @@ def set_relative_base_folder(self, folder):
updates[prop_name] = os.path.join(folder, value)
component._generator_properties.update(updates)
+ def deploy_base_folder(self, package_folder, deploy_folder):
+ """Prepend the folder to all the directories"""
+ for component in self.components.values():
+ for varname in _DIRS_VAR_NAMES:
+ origin = getattr(component, varname)
+ if origin is not None:
+ new_ = []
+ for el in origin:
+ rel_path = os.path.relpath(el, package_folder)
+ new_.append(os.path.join(deploy_folder, rel_path))
+ origin[:] = new_
+ # TODO: Missing properties
+
def get_sorted_components(self):
"""Order the components taking into account if they depend on another component in the
same package (not scoped with ::). First less dependant
diff --git a/conans/model/conan_file.py b/conans/model/conan_file.py
index ff3ad9e262a..84464e0411a 100644
--- a/conans/model/conan_file.py
+++ b/conans/model/conan_file.py
@@ -237,3 +237,7 @@ def test(self):
def __repr__(self):
return self.display_name
+
+ def set_deploy_folder(self, deploy_folder):
+ self.cpp_info.deploy_base_folder(self.package_folder, deploy_folder)
+ self.folders.set_base_package(deploy_folder)
diff --git a/conans/model/conanfile_interface.py b/conans/model/conanfile_interface.py
index a75613c9cce..347c7b52322 100644
--- a/conans/model/conanfile_interface.py
+++ b/conans/model/conanfile_interface.py
@@ -85,3 +85,10 @@ def is_build_context(self):
@property
def package_type(self):
return self._conanfile.package_type
+
+ @property
+ def info(self):
+ return self._conanfile.info
+
+ def set_deploy_folder(self, deploy_folder):
+ self._conanfile.set_deploy_folder(deploy_folder)
| diff --git a/conans/test/functional/command/test_install_deploy.py b/conans/test/functional/command/test_install_deploy.py
new file mode 100644
index 00000000000..6837ef36461
--- /dev/null
+++ b/conans/test/functional/command/test_install_deploy.py
@@ -0,0 +1,189 @@
+import os
+import textwrap
+
+import pytest
+
+from conans.test.assets.cmake import gen_cmakelists
+from conans.test.assets.genconanfile import GenConanfile
+from conans.test.assets.sources import gen_function_cpp
+from conans.test.utils.tools import TestClient
+from conans.util.files import save
+
+
+@pytest.mark.tool("cmake")
+def test_install_deploy():
+ c = TestClient()
+ c.run("new cmake_lib -d name=hello -d version=0.1")
+ c.run("create .")
+ cmake = gen_cmakelists(appname="my_app", appsources=["main.cpp"], find_package=["hello"])
+ deploy = textwrap.dedent("""
+ import os, shutil
+
+ # USE **KWARGS to be robust against changes
+ def deploy(conanfile, output_folder, **kwargs):
+ for r, d in conanfile.dependencies.items():
+ new_folder = os.path.join(output_folder, d.ref.name)
+ shutil.copytree(d.package_folder, new_folder)
+ d.set_deploy_folder(new_folder)
+ """)
+ c.save({"conanfile.txt": "[requires]\nhello/0.1",
+ "deploy.py": deploy,
+ "CMakeLists.txt": cmake,
+ "main.cpp": gen_function_cpp(name="main", includes=["hello"], calls=["hello"])},
+ clean_first=True)
+ c.run("install . --deploy=deploy.py -of=mydeploy -g CMakeToolchain -g CMakeDeps")
+ c.run("remove * -f") # Make sure the cache is clean, no deps there
+ cwd = c.current_folder.replace("\\", "/")
+ deps = c.load("mydeploy/hello-release-x86_64-data.cmake")
+ assert f'set(hello_PACKAGE_FOLDER_RELEASE "{cwd}/mydeploy/hello")' in deps
+ assert 'set(hello_INCLUDE_DIRS_RELEASE "${hello_PACKAGE_FOLDER_RELEASE}/include")' in deps
+ assert 'set(hello_LIB_DIRS_RELEASE "${hello_PACKAGE_FOLDER_RELEASE}/lib")' in deps
+
+ # I can totally build without errors with deployed
+ c.run_command("cmake . -DCMAKE_TOOLCHAIN_FILE=mydeploy/conan_toolchain.cmake")
+ c.run_command("cmake --build . --config Release")
+
+
+def test_multi_deploy():
+ """ check that we can add more than 1 deployer in the command line, both in local folders
+ and in cache.
+ Also testing that using .py extension or not, is the same
+ Also, the local folder have precedence over the cache extensions
+ """
+ c = TestClient()
+ deploy1 = textwrap.dedent("""
+ def deploy(conanfile, output_folder, **kwargs):
+ conanfile.output.info("deploy1!!")
+ """)
+ deploy2 = textwrap.dedent("""
+ def deploy(conanfile, output_folder, **kwargs):
+ conanfile.output.info("sub/deploy2!!")
+ """)
+ deploy_cache = textwrap.dedent("""
+ def deploy(conanfile, output_folder, **kwargs):
+ conanfile.output.info("deploy cache!!")
+ """)
+ save(os.path.join(c.cache_folder, "extensions", "deploy", "deploy_cache.py"), deploy_cache)
+ # This should never be called in this test, always the local is found first
+ save(os.path.join(c.cache_folder, "extensions", "deploy", "mydeploy.py"), "CRASH!!!!")
+ c.save({"conanfile.txt": "",
+ "mydeploy.py": deploy1,
+ "sub/mydeploy2.py": deploy2})
+
+ c.run("install . --deploy=mydeploy --deploy=sub/mydeploy2 --deploy=deploy_cache")
+ assert "conanfile.txt: deploy1!!" in c.out
+ assert "conanfile.txt: sub/deploy2!!" in c.out
+ assert "conanfile.txt: deploy cache!!" in c.out
+
+ # Now with .py extension
+ c.run("install . --deploy=mydeploy.py --deploy=sub/mydeploy2.py --deploy=deploy_cache.py")
+ assert "conanfile.txt: deploy1!!" in c.out
+ assert "conanfile.txt: sub/deploy2!!" in c.out
+ assert "conanfile.txt: deploy cache!!" in c.out
+
+
+def test_builtin_deploy():
+ """ check the built-in full_deploy
+ """
+ c = TestClient()
+ conanfile = textwrap.dedent("""
+ import os
+ from conan import ConanFile
+ from conan.tools.files import save
+ class Pkg(ConanFile):
+ settings = "arch", "build_type"
+ def package(self):
+ content = f"{self.settings.build_type}-{self.settings.arch}"
+ save(self, os.path.join(self.package_folder, "include/hello.h"), content)
+ """)
+ c.save({"conanfile.py": conanfile})
+ c.run("create . --name=dep --version=0.1")
+ c.run("create . --name=dep --version=0.1 -s build_type=Debug -s arch=x86")
+ c.save({"conanfile.txt": "[requires]\ndep/0.1"}, clean_first=True)
+ c.run("install . --deploy=full_deploy -of=output -g CMakeDeps")
+ assert "Conan built-in full deployer" in c.out
+ c.run("install . --deploy=full_deploy -of=output -g CMakeDeps "
+ "-s build_type=Debug -s arch=x86")
+ release = c.load("output/host/dep/0.1/Release/x86_64/include/hello.h")
+ assert "Release-x86_64" in release
+ debug = c.load("output/host/dep/0.1/Debug/x86/include/hello.h")
+ assert "Debug-x86" in debug
+ cmake_release = c.load("output/dep-release-x86_64-data.cmake")
+ assert 'set(dep_INCLUDE_DIRS_RELEASE "${dep_PACKAGE_FOLDER_RELEASE}/include")' in cmake_release
+ assert "output/host/dep/0.1/Release/x86_64" in cmake_release
+ cmake_debug = c.load("output/dep-debug-x86-data.cmake")
+ assert 'set(dep_INCLUDE_DIRS_DEBUG "${dep_PACKAGE_FOLDER_DEBUG}/include")' in cmake_debug
+ assert "output/host/dep/0.1/Debug/x86" in cmake_debug
+
+
+def test_deploy_reference():
+ """ check that we can also deploy a reference
+ """
+ c = TestClient()
+ c.save({"conanfile.py": GenConanfile("pkg", "1.0").with_package_file("include/hi.h", "hi")})
+ c.run("create .")
+
+ c.run("install --reference=pkg/1.0 --deploy=full_deploy --output-folder=output")
+ # NOTE: Full deployer always use build_type/arch, even if None/None in the path, same structure
+ header = c.load("output/host/pkg/1.0/None/None/include/hi.h")
+ assert "hi" in header
+
+ # Testing that we can deploy to the current folder too
+ c.save({}, clean_first=True)
+ c.run("install --reference=pkg/1.0 --deploy=full_deploy")
+ # NOTE: Full deployer always use build_type/arch, even if None/None in the path, same structure
+ header = c.load("host/pkg/1.0/None/None/include/hi.h")
+ assert "hi" in header
+
+
+def test_deploy_overwrite():
+ """ calling several times the install --deploy doesn't crash if files already exist
+ """
+ c = TestClient()
+ c.save({"conanfile.py": GenConanfile("pkg", "1.0").with_package_file("include/hi.h", "hi")})
+ c.run("create .")
+
+ c.run("install --reference=pkg/1.0 --deploy=full_deploy --output-folder=output")
+ header = c.load("output/host/pkg/1.0/None/None/include/hi.h")
+ assert "hi" in header
+
+ # modify the package
+ c.save({"conanfile.py": GenConanfile("pkg", "1.0").with_package_file("include/hi.h", "bye")})
+ c.run("create .")
+ c.run("install --reference=pkg/1.0 --deploy=full_deploy --output-folder=output")
+ header = c.load("output/host/pkg/1.0/None/None/include/hi.h")
+ assert "bye" in header
+
+
+def test_deploy_editable():
+ """ when deploying something that is editable, with the full_deploy built-in, it will copy the
+ editable files as-is, but it doesn't fail at this moment
+ """
+
+ c = TestClient()
+ c.save({"conanfile.py": GenConanfile("pkg", "1.0"),
+ "src/include/hi.h": "hi"})
+ c.run("editable add . pkg/1.0")
+
+ c.run("install --reference=pkg/1.0 --deploy=full_deploy --output-folder=output")
+ header = c.load("output/host/pkg/1.0/None/None/src/include/hi.h")
+ assert "hi" in header
+
+
+def test_deploy_single_package():
+ """ Lets try a deploy that executes on a single package reference
+ """
+ c = TestClient()
+ c.save({"conanfile.py": GenConanfile("pkg", "1.0").with_package_file("include/hi.h", "hi"),
+ "consumer/conanfile.txt": "[requires]\npkg/1.0"})
+ c.run("create .")
+
+ # if we deploy one --reference, we get that package
+ c.run("install --reference=pkg/1.0 --deploy=direct_deploy --output-folder=output")
+ header = c.load("output/pkg/include/hi.h")
+ assert "hi" in header
+
+ # If we deploy a local conanfile.txt, we get deployed its direct dependencies
+ c.run("install consumer/conanfile.txt --deploy=direct_deploy --output-folder=output2")
+ header = c.load("output2/pkg/include/hi.h")
+ assert "hi" in header
| [
{
"components": [
{
"doc": "implements the logic of finding a deployer, with priority:\n- 1) absolute paths\n- 2) relative to cwd\n- 3) in the cache/extensions/deploy folder\n- 4) built-in",
"lines": [
56,
82
],
"name": "_find_deployer",
"signatu... | [
"conans/test/functional/command/test_install_deploy.py::test_multi_deploy",
"conans/test/functional/command/test_install_deploy.py::test_builtin_deploy",
"conans/test/functional/command/test_install_deploy.py::test_deploy_reference",
"conans/test/functional/command/test_install_deploy.py::test_deploy_overwrit... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
New Conan 2.0 deployers
Close https://github.com/conan-io/conan/issues/10506
New approach for deploy of files:
- ``conan install --deploy=mydeploy`` will search for a ``mydeploy.py`` in this order:
- First in local folder. ``--deploy=folder/mydeploy`` is valid
- Then in local cache ``extensions/deploy`` folder
- The .py extension can be added to command line too
- ``conan install --deploy=conan_xxxx`` is for built-in deployers. ``conan_full_deploy`` one is proposed
- More than 1 deployer can be specified
- Deployers always run before generators, and only in the consumer side
- Deployers can change the folder, so generators that execute after will point to the deployed folder, not the cache
- Deployers might be challenging to code that take into account packages in editable mode. Initially deployers will not work with editables.
-
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conans/cli/api/subapi/install.py]
(definition of _find_deployer:)
def _find_deployer(d, cache_deploy_folder):
"""implements the logic of finding a deployer, with priority:
- 1) absolute paths
- 2) relative to cwd
- 3) in the cache/extensions/deploy folder
- 4) built-in"""
(definition of _find_deployer._load:)
def _load(path):
(definition of _do_deploys:)
def _do_deploys(conan_api, graph, deploy, output_folder):
(definition of full_deploy:)
def full_deploy(conanfile, output_folder):
"""Deploys to output_folder + host/dep/0.1/Release/x86_64 subfolder"""
(definition of direct_deploy:)
def direct_deploy(conanfile, output_folder):
"""Deploys to output_folder a single package,"""
[end of new definitions in conans/cli/api/subapi/install.py]
[start of new definitions in conans/model/build_info.py]
(definition of CppInfo.deploy_base_folder:)
def deploy_base_folder(self, package_folder, deploy_folder):
"""Prepend the folder to all the directories"""
[end of new definitions in conans/model/build_info.py]
[start of new definitions in conans/model/conan_file.py]
(definition of ConanFile.set_deploy_folder:)
def set_deploy_folder(self, deploy_folder):
[end of new definitions in conans/model/conan_file.py]
[start of new definitions in conans/model/conanfile_interface.py]
(definition of ConanFileInterface.info:)
def info(self):
(definition of ConanFileInterface.set_deploy_folder:)
def set_deploy_folder(self, deploy_folder):
[end of new definitions in conans/model/conanfile_interface.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
sqlfluff__sqlfluff-2540 | 2,540 | sqlfluff/sqlfluff | 0.9 | ef7d86548757fcbc3405ac421c38acefa55db9aa | 2022-02-01T20:18:14Z | diff --git a/src/sqlfluff/core/default_config.cfg b/src/sqlfluff/core/default_config.cfg
index 2775cba87c9..8a9c5c91960 100644
--- a/src/sqlfluff/core/default_config.cfg
+++ b/src/sqlfluff/core/default_config.cfg
@@ -143,3 +143,7 @@ unquoted_identifiers_policy = all
quoted_identifiers_policy = all
allow_space_in_identifier = False
additional_allowed_characters = ""
+
+[sqlfluff:rules:L062]
+# Comma separated list of blocked words that should not be used
+blocked_words = None
diff --git a/src/sqlfluff/core/rules/config_info.py b/src/sqlfluff/core/rules/config_info.py
index dc1a33da948..dd70b57c734 100644
--- a/src/sqlfluff/core/rules/config_info.py
+++ b/src/sqlfluff/core/rules/config_info.py
@@ -137,6 +137,12 @@
"in addition to alphanumerics (A-Z, a-z, 0-9) and underscores."
),
},
+ "blocked_words": {
+ "definition": (
+ "Optional comma-separated list of blocked words which should not be used "
+ "in statements."
+ ),
+ },
}
diff --git a/src/sqlfluff/rules/L062.py b/src/sqlfluff/rules/L062.py
new file mode 100644
index 00000000000..5921f15c45b
--- /dev/null
+++ b/src/sqlfluff/rules/L062.py
@@ -0,0 +1,91 @@
+"""Implementation of Rule L062."""
+
+from typing import Optional
+
+from sqlfluff.core.rules.base import BaseRule, LintResult, RuleContext
+from sqlfluff.core.rules.doc_decorators import document_configuration
+
+
+@document_configuration
+class Rule_L062(BaseRule):
+ """Block a list of configurable words from being used.
+
+ This generic rule can be useful to prevent certain keywords, functions, or objects
+ from being used. Only whole words can be blocked, not phrases, nor parts of words.
+
+ This block list is case insensitive.
+
+ Example use cases:
+
+ * We prefer ``BOOL`` over ``BOOLEAN`` and there is no existing rule to enforce
+ this. Until such a rule is written, we can add ``BOOLEAN`` to the deny list
+ to cause a linting error to flag this.
+ * We have deprecated a schema/table/function and want to prevent it being used
+ in future. We can add that to the denylist and then add a ``-- noqa: L062`` for
+ the few exceptions that still need to be in the code base for now.
+
+ | **Anti-pattern**
+ | If the ``blocked_words`` config is set to ``deprecated_table,bool`` then the
+ | following will flag:
+
+ .. code-block:: sql
+
+ SELECT * FROM deprecated_table WHERE 1 = 1;
+ CREATE TABLE myschema.t1 (a BOOL);
+
+ | **Best practice**
+ | Do not used any blocked words:
+
+ .. code-block:: sql
+
+ SELECT * FROM another_table WHERE 1 = 1;
+ CREATE TABLE myschema.t1 (a BOOLEAN);
+
+ """
+
+ config_keywords = [
+ "blocked_words",
+ ]
+
+ def _eval(self, context: RuleContext) -> Optional[LintResult]:
+ # Config type hints
+ self.blocked_words: Optional[str]
+
+ # Exit early if no block list set
+ if not self.blocked_words:
+ return None
+
+ # Get the ignore list configuration and cache it
+ try:
+ blocked_words_list = self.blocked_words_list
+ except AttributeError:
+ # First-time only, read the settings from configuration.
+ # So we can cache them for next time for speed.
+ blocked_words_list = self._init_blocked_words()
+
+ # Only look at child elements
+ # Note: we do not need to ignore comments or meta types
+ # or the like as they will not have single word raws
+ if context.segment.segments:
+ return None
+
+ if context.segment.raw_upper in blocked_words_list:
+ return LintResult(
+ anchor=context.segment,
+ description=f"Use of blocked word '{context.segment.raw}'.",
+ )
+
+ return None
+
+ def _init_blocked_words(self):
+ """Called first time rule is evaluated to fetch & cache the blocked_words."""
+ blocked_words_config = getattr(self, "blocked_words")
+ if blocked_words_config:
+ self.blocked_words_list = self.split_comma_separated_string(
+ blocked_words_config.upper()
+ )
+ else: # pragma: no cover
+ # Shouldn't get here as we exit early if no block list
+ self.blocked_words_list = []
+
+ return self.blocked_words_list
| diff --git a/test/rules/std_L062_test.py b/test/rules/std_L062_test.py
new file mode 100644
index 00000000000..a838ad82fc0
--- /dev/null
+++ b/test/rules/std_L062_test.py
@@ -0,0 +1,20 @@
+"""Tests the python routines within L062."""
+from sqlfluff.core import FluffConfig
+from sqlfluff.core import Linter
+
+
+def test__rules__std_L062_raised() -> None:
+ """L062 is raised for use of blocked words with correct error message."""
+ sql = "SELECT MYOLDFUNCTION(col1) FROM deprecated_table;\n"
+ cfg = FluffConfig()
+ cfg.set_value(
+ config_path=["rules", "L062", "blocked_words"],
+ val="myoldfunction,deprecated_table",
+ )
+ linter = Linter(config=cfg)
+ result_records = linter.lint_string_wrapped(sql).as_records()
+ result = result_records[0]["violations"]
+
+ assert len(result) == 2
+ assert result[0]["description"] == "Use of blocked word 'MYOLDFUNCTION'."
+ assert result[1]["description"] == "Use of blocked word 'deprecated_table'."
| diff --git a/src/sqlfluff/core/default_config.cfg b/src/sqlfluff/core/default_config.cfg
index 2775cba87c9..8a9c5c91960 100644
--- a/src/sqlfluff/core/default_config.cfg
+++ b/src/sqlfluff/core/default_config.cfg
@@ -143,3 +143,7 @@ unquoted_identifiers_policy = all
quoted_identifiers_policy = all
allow_space_in_identifier = False
additional_allowed_characters = ""
+
+[sqlfluff:rules:L062]
+# Comma separated list of blocked words that should not be used
+blocked_words = None
| [
{
"components": [
{
"doc": "Block a list of configurable words from being used.\n\nThis generic rule can be useful to prevent certain keywords, functions, or objects\nfrom being used. Only whole words can be blocked, not phrases, nor parts of words.\n\nThis block list is case insensitive.\n\nExamp... | [
"test/rules/std_L062_test.py::test__rules__std_L062_raised"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Adds new rule L062 to allow blocking of certain words
<!--Thanks for adding this feature!-->
<!--Please give the Pull Request a meaningful title for the release notes-->
### Brief summary of the change made
<!--Please include `fixes #XXXX` to automatically close any corresponding issue when the pull request is merged. Alternatively if not fully closed you can say `makes progress on #XXXX`.-->
Fixes #361
Fixes #1800
Follow on from #2507
This adds a very configurable new rule to allow blocking of certain terms. To quote from the rule docstring:
This generic rule can be useful to prevent certain keywords, functions, or objects
from being used. Only whole words can be blocked, not phrases.
This block list is case insensitive.
Example use cases:
* We prefer ``BOOL`` over ``BOOLEAN`` and there is no existing rule to enforce
this. We can add ``BOOLEAN`` to the deny list until such a rule is written to
cause a linting error.
* We have deprecated a schema/table/function and want to prevent it being used
in future. We can add that to the denylist and then add a ``-- noqa: L062`` for
the few exceptions that still need to be in the code base.
### Are there any other side effects of this change that we should be aware of?
Can't think of any. Obviously people shouldn't block certain words (e.g. `SELECT`) but that's up to them.
### Pull Request checklist
- [x] Please confirm you have completed any of the necessary steps below.
- Included test cases to demonstrate any code changes, which may be one or more of the following:
- `.yml` rule test cases in `test/fixtures/rules/std_rule_cases`.
- `.sql`/`.yml` parser test cases in `test/fixtures/dialects` (note YML files can be auto generated with `tox -e generate-fixture-yml`).
- Full autofix test cases in `test/fixtures/linter/autofix`.
- Other.
- Added appropriate documentation for the change.
- Created GitHub issues for any relevant followup/future enhancements if appropriate.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/sqlfluff/rules/L062.py]
(definition of Rule_L062:)
class Rule_L062(BaseRule):
"""Block a list of configurable words from being used.
This generic rule can be useful to prevent certain keywords, functions, or objects
from being used. Only whole words can be blocked, not phrases, nor parts of words.
This block list is case insensitive.
Example use cases:
* We prefer ``BOOL`` over ``BOOLEAN`` and there is no existing rule to enforce
this. Until such a rule is written, we can add ``BOOLEAN`` to the deny list
to cause a linting error to flag this.
* We have deprecated a schema/table/function and want to prevent it being used
in future. We can add that to the denylist and then add a ``-- noqa: L062`` for
the few exceptions that still need to be in the code base for now.
| **Anti-pattern**
| If the ``blocked_words`` config is set to ``deprecated_table,bool`` then the
| following will flag:
.. code-block:: sql
SELECT * FROM deprecated_table WHERE 1 = 1;
CREATE TABLE myschema.t1 (a BOOL);
| **Best practice**
| Do not used any blocked words:
.. code-block:: sql
SELECT * FROM another_table WHERE 1 = 1;
CREATE TABLE myschema.t1 (a BOOLEAN);"""
(definition of Rule_L062._eval:)
def _eval(self, context: RuleContext) -> Optional[LintResult]:
(definition of Rule_L062._init_blocked_words:)
def _init_blocked_words(self):
"""Called first time rule is evaluated to fetch & cache the blocked_words."""
[end of new definitions in src/sqlfluff/rules/L062.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
New rule: database & schema restrictions
_Note: This is for general enhancements to the project. Please use the Bug report template instead to raise parsing/linting/syntax issues for existing supported dialects_
At some companies different databases & schemas serve different purposes. Some are secure. Others aren’t. Some are sandboxes not fit for production while others are for production. In order not to misuse data source it is a good idea to allow projects to prohibit usage of (or limit usage to) certain databases & schemas in their `.sqlfluff` file.
----------
@iajoiner you can remove the template text to make the issues more readable. I've edited them out of your current issues and raised #1792 to avoid this issue in future.
@alanmcruickshank here's another example as discussed in https://github.com/sqlfluff/sqlfluff/issues/361#issuecomment-955778171
--------------------
</issues> | ef27896c470cfb694e709d6407da57f22c10de86 |
Textualize__rich-1894 | 1,894 | Textualize/rich | null | 633faab16dc3a8c01a6562648cc2186c19a476e3 | 2022-01-31T14:32:01Z | diff --git a/rich/progress.py b/rich/progress.py
index 1f670db438..fe35b6c175 100644
--- a/rich/progress.py
+++ b/rich/progress.py
@@ -588,12 +588,7 @@ def __init__(
refresh_per_second is None or refresh_per_second > 0
), "refresh_per_second must be > 0"
self._lock = RLock()
- self.columns = columns or (
- TextColumn("[progress.description]{task.description}"),
- BarColumn(),
- TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
- TimeRemainingColumn(),
- )
+ self.columns = columns or self.get_default_columns()
self.speed_estimate_period = speed_estimate_period
self.disable = disable
@@ -613,6 +608,37 @@ def __init__(
self.print = self.console.print
self.log = self.console.log
+ @classmethod
+ def get_default_columns(cls) -> Tuple[ProgressColumn, ...]:
+ """Get the default columns used for a new Progress instance:
+ - a text column for the description (TextColumn)
+ - the bar itself (BarColumn)
+ - a text column showing completion percentage (TextColumn)
+ - an estimated-time-remaining column (TimeRemainingColumn)
+ If the Progress instance is created without passing a columns argument,
+ the default columns defined here will be used.
+
+ You can also create a Progress instance using custom columns before
+ and/or after the defaults, as in this example:
+
+ progress = Progress(
+ SpinnerColumn(),
+ *Progress.default_columns(),
+ "Elapsed:",
+ TimeElapsedColumn(),
+ )
+
+ This code shows the creation of a Progress display, containing
+ a spinner to the left, the default columns, and a labeled elapsed
+ time column.
+ """
+ return (
+ TextColumn("[progress.description]{task.description}"),
+ BarColumn(),
+ TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
+ TimeRemainingColumn(),
+ )
+
@property
def console(self) -> Console:
return self.live.console
@@ -1015,10 +1041,7 @@ def remove_task(self, task_id: TaskID) -> None:
with Progress(
SpinnerColumn(),
- TextColumn("[progress.description]{task.description}"),
- BarColumn(),
- TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
- TimeRemainingColumn(),
+ *Progress.get_default_columns(),
TimeElapsedColumn(),
console=console,
transient=True,
| diff --git a/tests/test_progress.py b/tests/test_progress.py
index 2020f91ffb..20b9d32ed4 100644
--- a/tests/test_progress.py
+++ b/tests/test_progress.py
@@ -334,6 +334,32 @@ def test_columns() -> None:
assert result == expected
+def test_using_default_columns() -> None:
+ # can only check types, as the instances do not '==' each other
+ expected_default_types = [
+ TextColumn,
+ BarColumn,
+ TextColumn,
+ TimeRemainingColumn,
+ ]
+
+ progress = Progress()
+ assert [type(c) for c in progress.columns] == expected_default_types
+
+ progress = Progress(
+ SpinnerColumn(),
+ *Progress.get_default_columns(),
+ "Elapsed:",
+ TimeElapsedColumn(),
+ )
+ assert [type(c) for c in progress.columns] == [
+ SpinnerColumn,
+ *expected_default_types,
+ str,
+ TimeElapsedColumn,
+ ]
+
+
def test_task_create() -> None:
task = Task(TaskID(1), "foo", 100, 0, _get_time=lambda: 1)
assert task.elapsed is None
| [
{
"components": [
{
"doc": "Get the default columns used for a new Progress instance:\n - a text column for the description (TextColumn)\n - the bar itself (BarColumn)\n - a text column showing completion percentage (TextColumn)\n - an estimated-time-remaining column (TimeRemainingColumn)\... | [
"tests/test_progress.py::test_using_default_columns"
] | [
"tests/test_progress.py::test_bar_columns",
"tests/test_progress.py::test_text_column",
"tests/test_progress.py::test_time_elapsed_column",
"tests/test_progress.py::test_time_remaining_column",
"tests/test_progress.py::test_renderable_column",
"tests/test_progress.py::test_spinner_column",
"tests/test_p... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add default_columns classmethod to Progress class
## Type of changes
- [ ] Bug fix
- [x] New feature
- [ ] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [x] I've run the latest [black](https://github.com/psf/black) with default args on new code.
- [ ] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [x] I've added tests for new code.
- [x] I accept that @willmcgugan may be pedantic in the code review.
## Description
Added new default_columns() classmethod to the Progress class, so that client code does not need to replicate the defaults literally, but can just add `*Progress.default_columns()`
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rich/progress.py]
(definition of Progress.get_default_columns:)
def get_default_columns(cls) -> Tuple[ProgressColumn, ...]:
"""Get the default columns used for a new Progress instance:
- a text column for the description (TextColumn)
- the bar itself (BarColumn)
- a text column showing completion percentage (TextColumn)
- an estimated-time-remaining column (TimeRemainingColumn)
If the Progress instance is created without passing a columns argument,
the default columns defined here will be used.
You can also create a Progress instance using custom columns before
and/or after the defaults, as in this example:
progress = Progress(
SpinnerColumn(),
*Progress.default_columns(),
"Elapsed:",
TimeElapsedColumn(),
)
This code shows the creation of a Progress display, containing
a spinner to the left, the default columns, and a labeled elapsed
time column."""
[end of new definitions in rich/progress.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | b0661de34bab35af9b4b1d3ba8e28b186b225e84 | ||
Textualize__textual-246 | 246 | Textualize/textual | null | 3574a6da172c98a43f813033f39c610d5a3afd84 | 2022-01-31T13:04:35Z | diff --git a/src/textual/renderables/__init__.py b/src/textual/renderables/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/src/textual/renderables/underline_bar.py b/src/textual/renderables/underline_bar.py
new file mode 100644
index 0000000000..59c9e6bb40
--- /dev/null
+++ b/src/textual/renderables/underline_bar.py
@@ -0,0 +1,122 @@
+from __future__ import annotations
+
+from rich.console import ConsoleOptions, Console, RenderResult
+from rich.segment import Segment
+from rich.style import StyleType
+
+
+class UnderlineBar:
+ """Thin horizontal bar with a portion highlighted.
+
+ Args:
+ highlight_range (tuple[float, float]): The range to highlight. Defaults to ``(0, 0)`` (no highlight)
+ highlight_style (StyleType): The style of the highlighted range of the bar.
+ background_style (StyleType): The style of the non-highlighted range(s) of the bar.
+ width (int, optional): The width of the bar, or ``None`` to fill available width.
+ """
+
+ def __init__(
+ self,
+ highlight_range: tuple[float, float] = (0, 0),
+ highlight_style: StyleType = "magenta",
+ background_style: StyleType = "grey37",
+ width: int | None = None,
+ ) -> None:
+ self.highlight_range = highlight_range
+ self.highlight_style = highlight_style
+ self.background_style = background_style
+ self.width = width
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ highlight_style = console.get_style(self.highlight_style)
+ background_style = console.get_style(self.background_style)
+
+ half_bar_right = "╸"
+ half_bar_left = "╺"
+ bar = "━"
+
+ width = self.width or options.max_width
+ start, end = self.highlight_range
+
+ start = max(start, 0)
+ end = min(end, width)
+
+ if start == end == 0 or end < 0 or start > end:
+ yield Segment(bar * width, style=background_style)
+ return
+
+ # Round start and end to nearest half
+ start = round(start * 2) / 2
+ end = round(end * 2) / 2
+
+ # Check if we start/end on a number that rounds to a .5
+ half_start = start - int(start) > 0
+ half_end = end - int(end) > 0
+
+ # Initial non-highlighted portion of bar
+ yield Segment(bar * (int(start - 0.5)), style=background_style)
+ if not half_start and start > 0:
+ yield Segment(half_bar_right, style=background_style)
+
+ # The highlighted portion
+ bar_width = int(end) - int(start)
+ if half_start:
+ yield Segment(half_bar_left + bar * (bar_width - 1), style=highlight_style)
+ else:
+ yield Segment(bar * bar_width, style=highlight_style)
+ if half_end:
+ yield Segment(half_bar_right, style=highlight_style)
+
+ # The non-highlighted tail
+ if not half_end and end - width != 0:
+ yield Segment(half_bar_left, style=background_style)
+ yield Segment(bar * (int(width) - int(end) - 1), style=background_style)
+
+
+if __name__ == "__main__":
+ import random
+ from time import sleep
+ from rich.color import ANSI_COLOR_NAMES
+
+ console = Console()
+
+ def frange(start, end, step):
+ current = start
+ while current < end:
+ yield current
+ current += step
+
+ while current >= 0:
+ yield current
+ current -= step
+
+ step = 0.1
+ start_range = frange(0.5, 10.5, step)
+ end_range = frange(10, 20, step)
+ ranges = zip(start_range, end_range)
+
+ console.print(UnderlineBar(width=20), f" (.0, .0)")
+
+ for range in ranges:
+ color = random.choice(list(ANSI_COLOR_NAMES.keys()))
+ console.print(
+ UnderlineBar(
+ range,
+ highlight_style=color,
+ width=20,
+ ),
+ f" {range}",
+ )
+
+ from rich.live import Live
+
+ bar = UnderlineBar(width=80, highlight_range=(0, 4.5))
+ with Live(bar, refresh_per_second=60) as live:
+ while True:
+ bar.highlight_range = (
+ bar.highlight_range[0] + 0.1,
+ bar.highlight_range[1] + 0.1,
+ )
+ sleep(0.005)
| diff --git a/tests/renderables/__init__.py b/tests/renderables/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/renderables/test_underline_bar.py b/tests/renderables/test_underline_bar.py
new file mode 100644
index 0000000000..5c5e4de9ce
--- /dev/null
+++ b/tests/renderables/test_underline_bar.py
@@ -0,0 +1,126 @@
+from tests.utilities.render import render
+from textual.renderables.underline_bar import UnderlineBar
+
+MAGENTA = "\x1b[35m"
+GREY = "\x1b[38;5;59m"
+STOP = "\x1b[0m"
+GREEN = "\x1b[32m"
+RED = "\x1b[31m"
+
+
+def test_no_highlight():
+ bar = UnderlineBar(width=6)
+ assert render(bar) == f"{GREY}━━━━━━{STOP}"
+
+
+def test_highlight_from_zero():
+ bar = UnderlineBar(highlight_range=(0, 2.5), width=6)
+ assert render(bar) == (
+ f"{MAGENTA}━━{STOP}{MAGENTA}╸{STOP}{GREY}━━━{STOP}"
+ )
+
+
+def test_highlight_from_zero_point_five():
+ bar = UnderlineBar(highlight_range=(0.5, 2), width=6)
+ assert render(bar) == (
+ f"{MAGENTA}╺━{STOP}{GREY}╺{STOP}{GREY}━━━{STOP}"
+ )
+
+
+def test_highlight_middle():
+ bar = UnderlineBar(highlight_range=(2, 4), width=6)
+ assert render(bar) == (
+ f"{GREY}━{STOP}"
+ f"{GREY}╸{STOP}"
+ f"{MAGENTA}━━{STOP}"
+ f"{GREY}╺{STOP}"
+ f"{GREY}━{STOP}"
+ )
+
+
+def test_highlight_half_start():
+ bar = UnderlineBar(highlight_range=(2.5, 4), width=6)
+ assert render(bar) == (
+ f"{GREY}━━{STOP}"
+ f"{MAGENTA}╺━{STOP}"
+ f"{GREY}╺{STOP}"
+ f"{GREY}━{STOP}"
+ )
+
+
+def test_highlight_half_end():
+ bar = UnderlineBar(highlight_range=(2, 4.5), width=6)
+ assert render(bar) == (
+ f"{GREY}━{STOP}"
+ f"{GREY}╸{STOP}"
+ f"{MAGENTA}━━{STOP}"
+ f"{MAGENTA}╸{STOP}"
+ f"{GREY}━{STOP}"
+ )
+
+
+def test_highlight_half_start_and_half_end():
+ bar = UnderlineBar(highlight_range=(2.5, 4.5), width=6)
+ assert render(bar) == (
+ f"{GREY}━━{STOP}"
+ f"{MAGENTA}╺━{STOP}"
+ f"{MAGENTA}╸{STOP}"
+ f"{GREY}━{STOP}"
+ )
+
+
+def test_highlight_to_near_end():
+ bar = UnderlineBar(highlight_range=(3, 5.5), width=6)
+ assert render(bar) == (
+ f"{GREY}━━{STOP}"
+ f"{GREY}╸{STOP}"
+ f"{MAGENTA}━━{STOP}"
+ f"{MAGENTA}╸{STOP}"
+ )
+
+
+def test_highlight_to_end():
+ bar = UnderlineBar(highlight_range=(3, 6), width=6)
+ assert render(bar) == (
+ f"{GREY}━━{STOP}{GREY}╸{STOP}{MAGENTA}━━━{STOP}"
+ )
+
+
+def test_highlight_out_of_bounds_start():
+ bar = UnderlineBar(highlight_range=(-2, 3), width=6)
+ assert render(bar) == (
+ f"{MAGENTA}━━━{STOP}{GREY}╺{STOP}{GREY}━━{STOP}"
+ )
+
+
+def test_highlight_out_of_bounds_end():
+ bar = UnderlineBar(highlight_range=(3, 9), width=6)
+ assert render(bar) == (
+ f"{GREY}━━{STOP}{GREY}╸{STOP}{MAGENTA}━━━{STOP}"
+ )
+
+
+def test_highlight_full_range_out_of_bounds_end():
+ bar = UnderlineBar(highlight_range=(9, 10), width=6)
+ assert render(bar) == f"{GREY}━━━━━━{STOP}"
+
+
+def test_highlight_full_range_out_of_bounds_start():
+ bar = UnderlineBar(highlight_range=(-5, -2), width=6)
+ assert render(bar) == f"{GREY}━━━━━━{STOP}"
+
+
+def test_custom_styles():
+ bar = UnderlineBar(
+ highlight_range=(2, 4),
+ highlight_style="red",
+ background_style="green",
+ width=6
+ )
+ assert render(bar) == (
+ f"{GREEN}━{STOP}"
+ f"{GREEN}╸{STOP}"
+ f"{RED}━━{STOP}"
+ f"{GREEN}╺{STOP}"
+ f"{GREEN}━{STOP}"
+ )
diff --git a/tests/utilities/render.py b/tests/utilities/render.py
new file mode 100644
index 0000000000..a2435c542a
--- /dev/null
+++ b/tests/utilities/render.py
@@ -0,0 +1,24 @@
+import io
+import re
+
+from rich.console import Console, RenderableType
+
+
+re_link_ids = re.compile(r"id=[\d\.\-]*?;.*?\x1b")
+
+
+def replace_link_ids(render: str) -> str:
+ """Link IDs have a random ID and system path which is a problem for
+ reproducible tests.
+
+ """
+ return re_link_ids.sub("id=0;foo\x1b", render)
+
+
+def render(renderable: RenderableType, no_wrap: bool = False) -> str:
+ console = Console(
+ width=100, file=io.StringIO(), color_system="truecolor", legacy_windows=False
+ )
+ console.print(renderable, no_wrap=no_wrap)
+ output = replace_link_ids(console.file.getvalue())
+ return output
| [
{
"components": [
{
"doc": "Thin horizontal bar with a portion highlighted.\n\nArgs:\n highlight_range (tuple[float, float]): The range to highlight. Defaults to ``(0, 0)`` (no highlight)\n highlight_style (StyleType): The style of the highlighted range of the bar.\n background_style (Sty... | [
"tests/renderables/test_underline_bar.py::test_no_highlight",
"tests/renderables/test_underline_bar.py::test_highlight_from_zero",
"tests/renderables/test_underline_bar.py::test_highlight_from_zero_point_five",
"tests/renderables/test_underline_bar.py::test_highlight_middle",
"tests/renderables/test_underli... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Underline bar renderable
Naming is hard :) more than happy to hear suggestions for the params etc.
Closes #238
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/textual/renderables/underline_bar.py]
(definition of UnderlineBar:)
class UnderlineBar:
"""Thin horizontal bar with a portion highlighted.
Args:
highlight_range (tuple[float, float]): The range to highlight. Defaults to ``(0, 0)`` (no highlight)
highlight_style (StyleType): The style of the highlighted range of the bar.
background_style (StyleType): The style of the non-highlighted range(s) of the bar.
width (int, optional): The width of the bar, or ``None`` to fill available width."""
(definition of UnderlineBar.__init__:)
def __init__( self, highlight_range: tuple[float, float] = (0, 0), highlight_style: StyleType = "magenta", background_style: StyleType = "grey37", width: int | None = None, ) -> None:
(definition of UnderlineBar.__rich_console__:)
def __rich_console__( self, console: Console, options: ConsoleOptions ) -> RenderResult:
(definition of frange:)
def frange(start, end, step):
[end of new definitions in src/textual/renderables/underline_bar.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Implement a bar renderable for tabbed dialogs
Implement a renderable used to underline tabs in a tabbed dialog.
This renderable should use the same unicode characters as Rich progress bars. It should render a line with a portion in a different color extending from p1 to p2.
Also add the option to not render the highlight and just the background bar.
Suggested interface:
```python
class TabUnderline:
def __init__(self, highlight: tuple[float, float] | None = None, color1: Color, color2: Color):
...
```
----------
--------------------
</issues> | 86e93536b991014e0ea4bf993068202b446bb698 | |
sympy__sympy-22961 | 22,961 | sympy/sympy | 1.10 | 3e8695add7a25c8d70aeba7d6137496df02863fd | 2022-01-30T11:37:34Z | diff --git a/sympy/tensor/array/expressions/__init__.py b/sympy/tensor/array/expressions/__init__.py
index d465a542cc64..a45d425018aa 100644
--- a/sympy/tensor/array/expressions/__init__.py
+++ b/sympy/tensor/array/expressions/__init__.py
@@ -164,6 +164,7 @@
"Reshape",
"convert_array_to_matrix",
"convert_matrix_to_array",
+ "convert_array_to_indexed",
"convert_indexed_to_array",
"array_derive",
]
@@ -171,6 +172,7 @@
from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct, ArrayAdd, PermuteDims, ArrayDiagonal, \
ArrayContraction, Reshape, ArraySymbol, ArrayElement, ZeroArray, OneArray, ArrayElementwiseApplyFunc
from sympy.tensor.array.expressions.arrayexpr_derivatives import array_derive
+from sympy.tensor.array.expressions.conv_array_to_indexed import convert_array_to_indexed
from sympy.tensor.array.expressions.conv_array_to_matrix import convert_array_to_matrix
from sympy.tensor.array.expressions.conv_indexed_to_array import convert_indexed_to_array
from sympy.tensor.array.expressions.conv_matrix_to_array import convert_matrix_to_array
diff --git a/sympy/tensor/array/expressions/array_expressions.py b/sympy/tensor/array/expressions/array_expressions.py
index 282ae7dcf863..5af09a134b46 100644
--- a/sympy/tensor/array/expressions/array_expressions.py
+++ b/sympy/tensor/array/expressions/array_expressions.py
@@ -37,7 +37,16 @@
class _ArrayExpr(Expr):
- shape : tTuple[Expr, ...]
+ shape: tTuple[Expr, ...]
+
+ def __getitem__(self, item):
+ if not isinstance(item, collections.abc.Iterable):
+ item = (item,)
+ ArrayElement._check_shape(self, item)
+ return self._get(item)
+
+ def _get(self, item):
+ return _get_array_element_or_slice(self, item)
class ArraySymbol(_ArrayExpr):
@@ -61,9 +70,6 @@ def name(self):
def shape(self):
return self._args[1]
- def __getitem__(self, item):
- return ArrayElement(self, item)
-
def as_explicit(self):
if not all(i.is_Integer for i in self.shape):
raise ValueError("cannot express explicit array with symbolic shape")
@@ -71,7 +77,7 @@ def as_explicit(self):
return ImmutableDenseNDimArray(data).reshape(*self.shape)
-class ArrayElement(_ArrayExpr):
+class ArrayElement(Expr):
"""
An element of an array.
"""
@@ -87,13 +93,21 @@ def __new__(cls, name, indices):
if not isinstance(indices, collections.abc.Iterable):
indices = (indices,)
indices = _sympify(tuple(indices))
+ cls._check_shape(name, indices)
+ obj = Expr.__new__(cls, name, indices)
+ return obj
+
+ @classmethod
+ def _check_shape(cls, name, indices):
+ indices = tuple(indices)
if hasattr(name, "shape"):
+ index_error = IndexError("number of indices does not match shape of the array")
+ if len(indices) != len(name.shape):
+ raise index_error
if any((i >= s) == True for i, s in zip(indices, name.shape)):
raise ValueError("shape is out of bounds")
if any((i < 0) == True for i in indices):
raise ValueError("shape contains negative values")
- obj = Expr.__new__(cls, name, indices)
- return obj
@property
def name(self):
@@ -137,6 +151,9 @@ def as_explicit(self):
raise ValueError("Cannot return explicit form for symbolic shape.")
return ImmutableDenseNDimArray.zeros(*self.shape)
+ def _get(self, item):
+ return S.Zero
+
class OneArray(_ArrayExpr):
"""
@@ -159,6 +176,9 @@ def as_explicit(self):
raise ValueError("Cannot return explicit form for symbolic shape.")
return ImmutableDenseNDimArray([S.One for i in range(reduce(operator.mul, self.shape))]).reshape(*self.shape)
+ def _get(self, item):
+ return S.One
+
class _CodegenArrayAbstract(Basic):
@@ -1916,3 +1936,7 @@ def _permute_dims(expr, permutation, **kwargs):
def _array_add(*args, **kwargs):
return ArrayAdd(*args, canonicalize=True, **kwargs)
+
+
+def _get_array_element_or_slice(expr, indices):
+ return ArrayElement(expr, indices)
diff --git a/sympy/tensor/array/expressions/conv_array_to_indexed.py b/sympy/tensor/array/expressions/conv_array_to_indexed.py
new file mode 100644
index 000000000000..676620097ad1
--- /dev/null
+++ b/sympy/tensor/array/expressions/conv_array_to_indexed.py
@@ -0,0 +1,63 @@
+import collections.abc
+from itertools import accumulate
+
+from sympy import Mul, Sum, Dummy, Add
+from sympy.tensor.array.expressions import PermuteDims, ArrayAdd, ArrayElementwiseApplyFunc
+from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct, get_rank, ArrayContraction, \
+ ArrayDiagonal, get_shape, _get_array_element_or_slice, _ArrayExpr
+from sympy.tensor.array.expressions.utils import _apply_permutation_to_list
+
+
+def convert_array_to_indexed(expr, indices):
+ return _ConvertArrayToIndexed().do_convert(expr, indices)
+
+
+class _ConvertArrayToIndexed:
+
+ def __init__(self):
+ self.count_dummies: int = 0
+
+ def do_convert(self, expr, indices):
+ if isinstance(expr, ArrayTensorProduct):
+ cumul = list(accumulate([0] + [get_rank(arg) for arg in expr.args]))
+ indices_grp = [indices[cumul[i]:cumul[i+1]] for i in range(len(expr.args))]
+ return Mul.fromiter(self.do_convert(arg, ind) for arg, ind in zip(expr.args, indices_grp))
+ if isinstance(expr, ArrayContraction):
+ new_indices = [None for i in range(get_rank(expr.expr))]
+ limits = []
+ bottom_shape = get_shape(expr.expr)
+ for contraction_index_grp in expr.contraction_indices:
+ d = Dummy(f"d{self.count_dummies}")
+ self.count_dummies += 1
+ dim = bottom_shape[contraction_index_grp[0]]
+ limits.append((d, 0, dim-1))
+ for i in contraction_index_grp:
+ new_indices[i] = d
+ j = 0
+ for i in range(len(new_indices)):
+ if new_indices[i] is None:
+ new_indices[i] = indices[j]
+ j += 1
+ newexpr = self.do_convert(expr.expr, new_indices)
+ return Sum(newexpr, *limits)
+ if isinstance(expr, ArrayDiagonal):
+ new_indices = [None for i in range(get_rank(expr.expr))]
+ ind_pos = expr._push_indices_down(expr.diagonal_indices, list(range(len(indices))), get_rank(expr))
+ for i, index in zip(ind_pos, indices):
+ if isinstance(i, collections.abc.Iterable):
+ for j in i:
+ new_indices[j] = index
+ else:
+ new_indices[i] = index
+ newexpr = self.do_convert(expr.expr, new_indices)
+ return newexpr
+ if isinstance(expr, PermuteDims):
+ permuted_indices = _apply_permutation_to_list(expr.permutation, indices)
+ return self.do_convert(expr.expr, permuted_indices)
+ if isinstance(expr, ArrayAdd):
+ return Add.fromiter(self.do_convert(arg, indices) for arg in expr.args)
+ if isinstance(expr, _ArrayExpr):
+ return expr.__getitem__(tuple(indices))
+ if isinstance(expr, ArrayElementwiseApplyFunc):
+ return expr.function(self.do_convert(expr.expr, indices))
+ return _get_array_element_or_slice(expr, indices)
diff --git a/sympy/tensor/array/expressions/conv_indexed_to_array.py b/sympy/tensor/array/expressions/conv_indexed_to_array.py
index e88ca2bbd6a6..52c9572ef111 100644
--- a/sympy/tensor/array/expressions/conv_indexed_to_array.py
+++ b/sympy/tensor/array/expressions/conv_indexed_to_array.py
@@ -1,5 +1,6 @@
from collections import defaultdict
+from sympy import Function
from sympy.combinatorics.permutations import _af_invert
from sympy.concrete.summations import Sum
from sympy.core.add import Add
@@ -8,6 +9,7 @@
from sympy.core.power import Pow
from sympy.core.sorting import default_sort_key
from sympy.functions.special.tensor_functions import KroneckerDelta
+from sympy.tensor.array.expressions import ArrayElementwiseApplyFunc
from sympy.tensor.indexed import (Indexed, IndexedBase)
from sympy.combinatorics import Permutation
from sympy.matrices.expressions.matexpr import MatrixElement
@@ -249,4 +251,7 @@ def _convert_indexed_to_array(expr):
diags = zip(*[(2*i, 2*i + 1) for i in range(expr.exp)])
arr = _array_diagonal(_array_tensor_product(*[subexpr for i in range(expr.exp)]), *diags)
return arr, subindices
+ if isinstance(expr, Function):
+ subexpr, subindices = _convert_indexed_to_array(expr.args[0])
+ return ArrayElementwiseApplyFunc(type(expr), subexpr), subindices
return expr, ()
diff --git a/sympy/tensor/array/expressions/utils.py b/sympy/tensor/array/expressions/utils.py
index 82df0b05a4f0..e55c0e6ed47c 100644
--- a/sympy/tensor/array/expressions/utils.py
+++ b/sympy/tensor/array/expressions/utils.py
@@ -1,6 +1,7 @@
import bisect
from collections import defaultdict
+from sympy.combinatorics import Permutation
from sympy.core.containers import Tuple
from sympy.core.numbers import Integer
@@ -110,3 +111,13 @@ def transform(j):
return j + shifts[-1] - len(shifts) + 1
return transform
+
+
+def _apply_permutation_to_list(perm: Permutation, target_list: list):
+ """
+ Permute a list according to the given permutation.
+ """
+ new_list = [None for i in range(perm.size)]
+ for i, e in enumerate(target_list):
+ new_list[perm(i)] = e
+ return new_list
| diff --git a/sympy/tensor/array/expressions/tests/test_array_expressions.py b/sympy/tensor/array/expressions/tests/test_array_expressions.py
index 206f981296d7..ee7ea4051658 100644
--- a/sympy/tensor/array/expressions/tests/test_array_expressions.py
+++ b/sympy/tensor/array/expressions/tests/test_array_expressions.py
@@ -62,6 +62,23 @@ def test_array_symbol_and_element():
p = _permute_dims(A, Permutation(0, 2, 1))
assert isinstance(p, PermuteDims)
+ A = ArraySymbol("A", (2,))
+ raises(IndexError, lambda: A[()])
+ raises(IndexError, lambda: A[0, 1])
+ raises(ValueError, lambda: A[-1])
+ raises(ValueError, lambda: A[2])
+
+ O = OneArray(3, 4)
+ Z = ZeroArray(m, n)
+
+ raises(IndexError, lambda: O[()])
+ raises(IndexError, lambda: O[1, 2, 3])
+ raises(ValueError, lambda: O[3, 0])
+ raises(ValueError, lambda: O[0, 4])
+
+ assert O[1, 2] == 1
+ assert Z[1, 2] == 0
+
def test_zero_array():
assert ZeroArray() == 0
diff --git a/sympy/tensor/array/expressions/tests/test_convert_array_to_indexed.py b/sympy/tensor/array/expressions/tests/test_convert_array_to_indexed.py
new file mode 100644
index 000000000000..4a8e3c332eb5
--- /dev/null
+++ b/sympy/tensor/array/expressions/tests/test_convert_array_to_indexed.py
@@ -0,0 +1,46 @@
+from sympy import Sum, Dummy, sin
+from sympy.tensor.array.expressions import ArraySymbol, ArrayTensorProduct, ArrayContraction, PermuteDims, \
+ ArrayDiagonal, ArrayAdd, OneArray, ZeroArray, convert_indexed_to_array, ArrayElementwiseApplyFunc
+from sympy.tensor.array.expressions.conv_array_to_indexed import convert_array_to_indexed
+
+from sympy.abc import i, j, k, l, m, n, o
+
+
+def test_convert_array_to_indexed_main():
+ A = ArraySymbol("A", (3, 3, 3))
+ B = ArraySymbol("B", (3, 3))
+ C = ArraySymbol("C", (3, 3))
+
+ d_ = Dummy("d_")
+
+ assert convert_array_to_indexed(A, [i, j, k]) == A[i, j, k]
+
+ expr = ArrayTensorProduct(A, B, C)
+ conv = convert_array_to_indexed(expr, [i,j,k,l,m,n,o])
+ assert conv == A[i,j,k]*B[l,m]*C[n,o]
+ assert convert_indexed_to_array(conv, [i,j,k,l,m,n,o]) == expr
+
+ expr = ArrayContraction(A, (0, 2))
+ assert convert_array_to_indexed(expr, [i]).dummy_eq(Sum(A[d_, i, d_], (d_, 0, 2)))
+
+ expr = ArrayDiagonal(A, (0, 2))
+ assert convert_array_to_indexed(expr, [i, j]) == A[j, i, j]
+
+ A = ArraySymbol("A", (1, 2, 3))
+ expr = PermuteDims(A, [1, 2, 0])
+ conv = convert_array_to_indexed(expr, [i, j, k])
+ assert conv == A[k, i, j]
+ assert convert_indexed_to_array(conv, [i, j, k]) == expr
+
+ expr = ArrayAdd(B, C, PermuteDims(C, [1, 0]))
+ conv = convert_array_to_indexed(expr, [i, j])
+ assert conv == B[i, j] + C[i, j] + C[j, i]
+ assert convert_indexed_to_array(conv, [i, j]) == expr
+
+ expr = ArrayElementwiseApplyFunc(sin, A)
+ conv = convert_array_to_indexed(expr, [i, j, k])
+ assert conv == sin(A[i, j, k])
+ assert convert_indexed_to_array(conv, [i, j, k]).dummy_eq(expr)
+
+ assert convert_array_to_indexed(OneArray(3, 3), [i, j]) == 1
+ assert convert_array_to_indexed(ZeroArray(3, 3), [i, j]) == 0
diff --git a/sympy/tensor/array/expressions/tests/test_convert_index_to_array.py b/sympy/tensor/array/expressions/tests/test_convert_index_to_array.py
index a9cc2f382e1f..aaea09ac82b7 100644
--- a/sympy/tensor/array/expressions/tests/test_convert_index_to_array.py
+++ b/sympy/tensor/array/expressions/tests/test_convert_index_to_array.py
@@ -1,8 +1,10 @@
+from sympy import tanh
from sympy.concrete.summations import Sum
from sympy.core.symbol import symbols
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.matrices.expressions.special import Identity
+from sympy.tensor.array.expressions import ArrayElementwiseApplyFunc
from sympy.tensor.indexed import IndexedBase
from sympy.combinatorics import Permutation
from sympy.tensor.array.expressions.array_expressions import ArrayContraction, ArrayTensorProduct, \
@@ -118,6 +120,10 @@ def test_arrayexpr_convert_array_element_to_array_expression():
cg = convert_indexed_to_array(s, [j, i])
assert cg == ArrayTensorProduct(B, A)
+ s = tanh(A[i]*B[j])
+ cg = convert_indexed_to_array(s, [i, j])
+ assert cg.dummy_eq(ArrayElementwiseApplyFunc(tanh, ArrayTensorProduct(A, B)))
+
def test_arrayexpr_convert_indexed_to_array_and_back_to_matrix():
| [
{
"components": [
{
"doc": "",
"lines": [
42,
46
],
"name": "_ArrayExpr.__getitem__",
"signature": "def __getitem__(self, item):",
"type": "function"
},
{
"doc": "",
"lines": [
48,
49
... | [
"test_array_symbol_and_element",
"test_arrayexpr_convert_array_element_to_array_expression"
] | [
"test_zero_array",
"test_one_array",
"test_arrayexpr_contraction_construction",
"test_arrayexpr_array_flatten",
"test_arrayexpr_array_diagonal",
"test_arrayexpr_array_shape",
"test_arrayexpr_permutedims_sink",
"test_arrayexpr_push_indices_up_and_down",
"test_arrayexpr_split_multiple_contractions",
... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Added converter from array expression to indexed
Added function `convert_array_to_indexed( ... )` that is able to convert array expressions into an equivalent indexed expression.
<!-- BEGIN RELEASE NOTES -->
NO ENTRY
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/tensor/array/expressions/array_expressions.py]
(definition of _ArrayExpr.__getitem__:)
def __getitem__(self, item):
(definition of _ArrayExpr._get:)
def _get(self, item):
(definition of ArrayElement._check_shape:)
def _check_shape(cls, name, indices):
(definition of ZeroArray._get:)
def _get(self, item):
(definition of OneArray._get:)
def _get(self, item):
(definition of _get_array_element_or_slice:)
def _get_array_element_or_slice(expr, indices):
[end of new definitions in sympy/tensor/array/expressions/array_expressions.py]
[start of new definitions in sympy/tensor/array/expressions/conv_array_to_indexed.py]
(definition of convert_array_to_indexed:)
def convert_array_to_indexed(expr, indices):
(definition of _ConvertArrayToIndexed:)
class _ConvertArrayToIndexed:
(definition of _ConvertArrayToIndexed.__init__:)
def __init__(self):
(definition of _ConvertArrayToIndexed.do_convert:)
def do_convert(self, expr, indices):
[end of new definitions in sympy/tensor/array/expressions/conv_array_to_indexed.py]
[start of new definitions in sympy/tensor/array/expressions/utils.py]
(definition of _apply_permutation_to_list:)
def _apply_permutation_to_list(perm: Permutation, target_list: list):
"""Permute a list according to the given permutation."""
[end of new definitions in sympy/tensor/array/expressions/utils.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3e8695add7a25c8d70aeba7d6137496df02863fd | ||
pvlib__pvlib-python-1395 | 1,395 | pvlib/pvlib-python | 0.8 | 26579bec7e65296223503b9e05da4af914af6777 | 2022-01-26T20:24:16Z | diff --git a/docs/sphinx/source/reference/iotools.rst b/docs/sphinx/source/reference/iotools.rst
index 514aeac2f5..14271cf3ee 100644
--- a/docs/sphinx/source/reference/iotools.rst
+++ b/docs/sphinx/source/reference/iotools.rst
@@ -31,6 +31,7 @@ of sources and file formats relevant to solar energy modeling.
iotools.read_pvgis_tmy
iotools.get_pvgis_hourly
iotools.read_pvgis_hourly
+ iotools.get_pvgis_horizon
iotools.get_bsrn
iotools.read_bsrn
iotools.parse_bsrn
diff --git a/docs/sphinx/source/whatsnew/v0.9.6.rst b/docs/sphinx/source/whatsnew/v0.9.6.rst
index 0950002edb..2401317532 100644
--- a/docs/sphinx/source/whatsnew/v0.9.6.rst
+++ b/docs/sphinx/source/whatsnew/v0.9.6.rst
@@ -15,6 +15,8 @@ Deprecations
Enhancements
~~~~~~~~~~~~
+* Added function to retrieve horizon data from PVGIS
+ :py:func:`pvlib.iotools.get_pvgis_horizon`. (:issue:`1290`, :pull:`1395`)
* Added ``map_variables`` argument to the :py:func:`pvlib.iotools.read_tmy3` in
order to offer the option of mapping column names to standard pvlib names.
(:issue:`1517`, :pull:`1623`)
@@ -25,7 +27,6 @@ Enhancements
* :py:func:`pvlib.iotools.get_psm3` now uses the new NSRDB 3.2.2 endpoint for
hourly and half-hourly single-year datasets. (:issue:`1591`, :pull:`1736`)
-
Bug fixes
~~~~~~~~~
* `data` can no longer be left unspecified in
@@ -53,9 +54,17 @@ Contributors
~~~~~~~~~~~~
* Lakshya Garg (:ghuser:`Lakshyadevelops`)
* Adam R. Jensen (:ghuser:`adamrjensen`)
+* Ben Pierce (:ghuser:`bgpierc`)
+* Joseph Palakapilly (:ghuser:`JPalakapillyKWH`)
+* Cliff Hansen (:ghuser:`cwhanse`)
+* Anton Driesse (:ghuser:`adriesse`)
+* Will Holmgren (:ghuser:`wholmgren`)
+* Mark Mikofski (:ghuser:`mikofski`)
+* Karel De Brabandere (:ghuser:`kdebrab`)
+* Josh Stein (:ghuser:`jsstein`)
+* Kevin Anderson (:ghuser:`kandersolar`)
* Siddharth Kaul (:ghuser:`k10blogger`)
* Kshitiz Gupta (:ghuser:`kshitiz305`)
* Stefan de Lange (:ghuser:`langestefan`)
* :ghuser:`ooprathamm`
* Kevin Anderson (:ghuser:`kandersolar`)
-
diff --git a/pvlib/iotools/__init__.py b/pvlib/iotools/__init__.py
index b02ce243ae..6f6a254a60 100644
--- a/pvlib/iotools/__init__.py
+++ b/pvlib/iotools/__init__.py
@@ -15,6 +15,7 @@
from pvlib.iotools.pvgis import get_pvgis_tmy, read_pvgis_tmy # noqa: F401
from pvlib.iotools.pvgis import read_pvgis_hourly # noqa: F401
from pvlib.iotools.pvgis import get_pvgis_hourly # noqa: F401
+from pvlib.iotools.pvgis import get_pvgis_horizon # noqa: F401
from pvlib.iotools.bsrn import get_bsrn # noqa: F401
from pvlib.iotools.bsrn import read_bsrn # noqa: F401
from pvlib.iotools.bsrn import parse_bsrn # noqa: F401
diff --git a/pvlib/iotools/pvgis.py b/pvlib/iotools/pvgis.py
index edfb28c124..16bfee7cee 100644
--- a/pvlib/iotools/pvgis.py
+++ b/pvlib/iotools/pvgis.py
@@ -665,3 +665,57 @@ def read_pvgis_tmy(filename, pvgis_format=None, map_variables=None):
data = data.rename(columns=VARIABLE_MAP)
return data, months_selected, inputs, meta
+
+
+def get_pvgis_horizon(latitude, longitude, url=URL, **kwargs):
+ """Get horizon data from PVGIS.
+
+ Parameters
+ ----------
+ latitude : float
+ Latitude in degrees north
+ longitude : float
+ Longitude in degrees east
+ url: str, default: :const:`pvlib.iotools.pvgis.URL`
+ Base URL for PVGIS
+ kwargs:
+ Passed to requests.get
+
+ Returns
+ -------
+ data : pd.Series
+ Pandas Series of the retrived horizon elevation angles. Index is the
+ corresponding horizon azimuth angles.
+ metadata : dict
+ Metadata returned by PVGIS.
+
+ Notes
+ -----
+ The horizon azimuths are specified clockwise from north, e.g., south=180.
+ This is the standard pvlib convention, although the PVGIS website specifies
+ south=0.
+
+ References
+ ----------
+ .. [1] `PVGIS horizon profile tool
+ <https://ec.europa.eu/jrc/en/PVGIS/tools/horizon>`_
+ """
+ params = {'lat': latitude, 'lon': longitude, 'outputformat': 'json'}
+ res = requests.get(url + 'printhorizon', params=params, **kwargs)
+ if not res.ok:
+ try:
+ err_msg = res.json()
+ except Exception:
+ res.raise_for_status()
+ else:
+ raise requests.HTTPError(err_msg['message'])
+ json_output = res.json()
+ metadata = json_output['meta']
+ data = pd.DataFrame(json_output['outputs']['horizon_profile'])
+ data.columns = ['horizon_azimuth', 'horizon_elevation']
+ # Convert azimuth to pvlib convention (north=0, south=180)
+ data['horizon_azimuth'] += 180
+ data.set_index('horizon_azimuth', inplace=True)
+ data = data['horizon_elevation'] # convert to pd.Series
+ data = data[data.index < 360] # remove duplicate north point (0 and 360)
+ return data, metadata
| diff --git a/pvlib/tests/iotools/test_pvgis.py b/pvlib/tests/iotools/test_pvgis.py
index 579c26914c..a5a5e3fbd7 100644
--- a/pvlib/tests/iotools/test_pvgis.py
+++ b/pvlib/tests/iotools/test_pvgis.py
@@ -9,8 +9,9 @@
import requests
from pvlib.iotools import get_pvgis_tmy, read_pvgis_tmy
from pvlib.iotools import get_pvgis_hourly, read_pvgis_hourly
+from pvlib.iotools import get_pvgis_horizon
from ..conftest import (DATA_DIR, RERUNS, RERUNS_DELAY, assert_frame_equal,
- fail_on_pvlib_version)
+ fail_on_pvlib_version, assert_series_equal)
from pvlib._deprecation import pvlibDeprecationWarning
@@ -509,6 +510,23 @@ def test_get_pvgis_map_variables(pvgis_tmy_mapped_columns):
assert all([c in pvgis_tmy_mapped_columns for c in actual.columns])
+@pytest.mark.remote_data
+@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
+def test_read_pvgis_horizon():
+ pvgis_data, _ = get_pvgis_horizon(35.171051, -106.465158)
+ horizon_data = pd.read_csv(DATA_DIR / 'test_read_pvgis_horizon.csv',
+ index_col=0)
+ horizon_data = horizon_data['horizon_elevation']
+ assert_series_equal(pvgis_data, horizon_data)
+
+
+@pytest.mark.remote_data
+@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
+def test_read_pvgis_horizon_invalid_coords():
+ with pytest.raises(requests.HTTPError, match='lat: Incorrect value'):
+ _, _ = get_pvgis_horizon(100, 50) # unfeasible latitude
+
+
def test_read_pvgis_tmy_map_variables(pvgis_tmy_mapped_columns):
fn = DATA_DIR / 'tmy_45.000_8.000_2005_2016.json'
actual, _, _, _ = read_pvgis_tmy(fn, map_variables=True)
| diff --git a/docs/sphinx/source/reference/iotools.rst b/docs/sphinx/source/reference/iotools.rst
index 514aeac2f5..14271cf3ee 100644
--- a/docs/sphinx/source/reference/iotools.rst
+++ b/docs/sphinx/source/reference/iotools.rst
@@ -31,6 +31,7 @@ of sources and file formats relevant to solar energy modeling.
iotools.read_pvgis_tmy
iotools.get_pvgis_hourly
iotools.read_pvgis_hourly
+ iotools.get_pvgis_horizon
iotools.get_bsrn
iotools.read_bsrn
iotools.parse_bsrn
diff --git a/docs/sphinx/source/whatsnew/v0.9.6.rst b/docs/sphinx/source/whatsnew/v0.9.6.rst
index 0950002edb..2401317532 100644
--- a/docs/sphinx/source/whatsnew/v0.9.6.rst
+++ b/docs/sphinx/source/whatsnew/v0.9.6.rst
@@ -15,6 +15,8 @@ Deprecations
Enhancements
~~~~~~~~~~~~
+* Added function to retrieve horizon data from PVGIS
+ :py:func:`pvlib.iotools.get_pvgis_horizon`. (:issue:`1290`, :pull:`1395`)
* Added ``map_variables`` argument to the :py:func:`pvlib.iotools.read_tmy3` in
order to offer the option of mapping column names to standard pvlib names.
(:issue:`1517`, :pull:`1623`)
@@ -25,7 +27,6 @@ Enhancements
* :py:func:`pvlib.iotools.get_psm3` now uses the new NSRDB 3.2.2 endpoint for
hourly and half-hourly single-year datasets. (:issue:`1591`, :pull:`1736`)
-
Bug fixes
~~~~~~~~~
* `data` can no longer be left unspecified in
@@ -53,9 +54,17 @@ Contributors
~~~~~~~~~~~~
* Lakshya Garg (:ghuser:`Lakshyadevelops`)
* Adam R. Jensen (:ghuser:`adamrjensen`)
+* Ben Pierce (:ghuser:`bgpierc`)
+* Joseph Palakapilly (:ghuser:`JPalakapillyKWH`)
+* Cliff Hansen (:ghuser:`cwhanse`)
+* Anton Driesse (:ghuser:`adriesse`)
+* Will Holmgren (:ghuser:`wholmgren`)
+* Mark Mikofski (:ghuser:`mikofski`)
+* Karel De Brabandere (:ghuser:`kdebrab`)
+* Josh Stein (:ghuser:`jsstein`)
+* Kevin Anderson (:ghuser:`kandersolar`)
* Siddharth Kaul (:ghuser:`k10blogger`)
* Kshitiz Gupta (:ghuser:`kshitiz305`)
* Stefan de Lange (:ghuser:`langestefan`)
* :ghuser:`ooprathamm`
* Kevin Anderson (:ghuser:`kandersolar`)
-
| [
{
"components": [
{
"doc": "Get horizon data from PVGIS.\n\nParameters\n----------\nlatitude : float\n Latitude in degrees north\nlongitude : float\n Longitude in degrees east\nurl: str, default: :const:`pvlib.iotools.pvgis.URL`\n Base URL for PVGIS\nkwargs:\n Passed to requests.get\n\... | [
"pvlib/tests/iotools/test_pvgis.py::test_read_pvgis_hourly[testfile0-expected_radiation_csv-metadata_exp0-inputs_exp0-False-None]",
"pvlib/tests/iotools/test_pvgis.py::test_read_pvgis_hourly[testfile1-expected_radiation_csv_mapped-metadata_exp1-inputs_exp1-True-csv]",
"pvlib/tests/iotools/test_pvgis.py::test_re... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Adding PVGIS horizon data retrieval method
- [x] Closes #1295, #758
- [x] I am familiar with the [contributing guidelines](https://pvlib-python.readthedocs.io/en/latest/contributing.html)
- [x] Tests added
- [x] Updates entries to [`docs/sphinx/source/api.rst`](https://github.com/pvlib/pvlib-python/blob/master/docs/sphinx/source/api.rst) for API changes.
- [x] Adds description and name entries in the appropriate "what's new" file in [`docs/sphinx/source/whatsnew`](https://github.com/pvlib/pvlib-python/tree/master/docs/sphinx/source/whatsnew) for all changes. Includes link to the GitHub Issue with `` :issue:`num` `` or this Pull Request with `` :pull:`num` ``. Includes contributor name and/or GitHub username (link with `` :ghuser:`user` ``).
- [x] New code is fully documented. Includes [numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html) compliant docstrings, examples, and comments where necessary.
- [x] Pull request is nearly complete and ready for detailed review.
- [x] Maintainer: Appropriate GitHub Labels and Milestone are assigned to the Pull Request and linked Issue.
This PR adds a function to fetch a horizon profile from PVGIS.
~~In this request, I've added two main functions: one in a new module horizon.py that, given an SRTM DEM, calculates a horizon profile in terms of (azimuth, elevation) manually, and an addition in iotools that fetches the same from pvgis (#1295 ). I also incorporated a few functions from abandoned #758. I've tested that they're compatible, but have not verified the results.~~
~~Along with #1295, this gives pvlib 3 potential ways of retrieving the horizon profile. I'm going to be testing them against each other and collected field data like @mikofski suggested in #1295 . I'll add more functionality as I go along but I wanted to check in with @cwhanse and @jsstein with what we have now. I'm currently working on retrieving raw SRTM data automatically, as well.~~
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in pvlib/iotools/pvgis.py]
(definition of get_pvgis_horizon:)
def get_pvgis_horizon(latitude, longitude, url=URL, **kwargs):
"""Get horizon data from PVGIS.
Parameters
----------
latitude : float
Latitude in degrees north
longitude : float
Longitude in degrees east
url: str, default: :const:`pvlib.iotools.pvgis.URL`
Base URL for PVGIS
kwargs:
Passed to requests.get
Returns
-------
data : pd.Series
Pandas Series of the retrived horizon elevation angles. Index is the
corresponding horizon azimuth angles.
metadata : dict
Metadata returned by PVGIS.
Notes
-----
The horizon azimuths are specified clockwise from north, e.g., south=180.
This is the standard pvlib convention, although the PVGIS website specifies
south=0.
References
----------
.. [1] `PVGIS horizon profile tool
<https://ec.europa.eu/jrc/en/PVGIS/tools/horizon>`_"""
[end of new definitions in pvlib/iotools/pvgis.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Add retrieval function for horizon profile from MINES Paris Tech
<!-- Thank you for your contribution! The following items must be addressed before the code can be merged. Please don't hesitate to ask for help if you're unsure of how to accomplish any of the items. Feel free to remove checklist items that are not relevant to your change. -->
- [x] I am familiar with the [contributing guidelines](https://pvlib-python.readthedocs.io/en/latest/contributing.html)
- [x] Tests added
- [x] Updates entries to [`docs/sphinx/source/api.rst`](https://github.com/pvlib/pvlib-python/blob/master/docs/sphinx/source/api.rst) for API changes.
- [x] Adds description and name entries in the appropriate "what's new" file in [`docs/sphinx/source/whatsnew`](https://github.com/pvlib/pvlib-python/tree/master/docs/sphinx/source/whatsnew) for all changes. Includes link to the GitHub Issue with `` :issue:`num` `` or this Pull Request with `` :pull:`num` ``. Includes contributor name and/or GitHub username (link with `` :ghuser:`user` ``).
- [x] New code is fully documented. Includes [numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html) compliant docstrings, examples, and comments where necessary.
- [x] Pull request is nearly complete and ready for detailed review.
- [x] Maintainer: Appropriate GitHub Labels and Milestone are assigned to the Pull Request and linked Issue.
<!-- Brief description of the problem and proposed solution (if not already fully described in the issue linked to above): -->
The proposed function retrieves the local horizon profile for a specific location (latitude, longitude, and elevation). The returned horizon profile has a resolution of 1 degree in the azimuth direction. The service is provided by MINES ParisTech though I cannot find any official documentation for it.
The function added in this PR (``pvlib.iotools.get_mines_horizon``) is very similar to the function added in #1395 (``pvlib.iotools.get_pvgis_horizon``).
----------
@mikofski @cwhanse I saw your discussions in #758 and #1290 and figured I'd share the code I had laying around for downloading the local horizon profile from SRTM. Does this have any interest to you?
I'm lovin' this! Could we also look into retrieving pvgis horizon data, how do they compare to the SRTM from MINES?
--------------------
</issues> | 311781d2380997044da0e484dc90aa146a74ca44 |
conan-io__conan-10437 | 10,437 | conan-io/conan | null | 9c34ea7568ec8b7eb36a3fbc0fd2ec8a30716830 | 2022-01-26T17:39:46Z | diff --git a/conan/tools/microsoft/__init__.py b/conan/tools/microsoft/__init__.py
index eb506393f75..e86a8b19dd3 100644
--- a/conan/tools/microsoft/__init__.py
+++ b/conan/tools/microsoft/__init__.py
@@ -1,6 +1,6 @@
from conan.tools.microsoft.toolchain import MSBuildToolchain
from conan.tools.microsoft.msbuild import MSBuild
from conan.tools.microsoft.msbuilddeps import MSBuildDeps
-from conan.tools.microsoft.visual import msvc_runtime_flag, VCVars, is_msvc
+from conan.tools.microsoft.visual import msvc_runtime_flag, VCVars, is_msvc, is_msvc_static_runtime
from conan.tools.microsoft.subsystems import subsystem_path
from conan.tools.microsoft.layout import vs_layout
diff --git a/conan/tools/microsoft/visual.py b/conan/tools/microsoft/visual.py
index 9dbdc5986fa..fefb5df6590 100644
--- a/conan/tools/microsoft/visual.py
+++ b/conan/tools/microsoft/visual.py
@@ -181,3 +181,11 @@ def is_msvc(conanfile):
"""
settings = conanfile.settings
return settings.get_safe("compiler") in ["Visual Studio", "msvc"]
+
+
+def is_msvc_static_runtime(conanfile):
+ """ Validate when building with Visual Studio or msvc and MT on runtime
+ :param conanfile: ConanFile instance
+ :return: True, if msvc + runtime MT. Otherwise, False
+ """
+ return is_msvc(conanfile) and "MT" in msvc_runtime_flag(conanfile)
| diff --git a/conans/test/unittests/tools/microsoft/test_msbuild.py b/conans/test/unittests/tools/microsoft/test_msbuild.py
index f6aab1c5dff..14bfbb02a47 100644
--- a/conans/test/unittests/tools/microsoft/test_msbuild.py
+++ b/conans/test/unittests/tools/microsoft/test_msbuild.py
@@ -4,10 +4,10 @@
import pytest
from mock import Mock
-from conan.tools.microsoft import MSBuild, MSBuildToolchain, is_msvc
+from conan.tools.microsoft import MSBuild, MSBuildToolchain, is_msvc, is_msvc_static_runtime
from conans.model.conf import ConfDefinition, Conf
from conans.model.env_info import EnvValues
-from conans.test.utils.mocks import ConanFileMock, MockSettings
+from conans.test.utils.mocks import ConanFileMock, MockSettings, MockOptions, MockConanfile
from conans.test.utils.test_files import temp_folder
from conans.tools import load
from conans import ConanFile, Settings
@@ -189,3 +189,23 @@ def test_is_msvc(compiler, expected):
conanfile.initialize(settings, EnvValues())
conanfile.settings.compiler = compiler
assert is_msvc(conanfile) == expected
+
+
+@pytest.mark.parametrize("compiler,shared,runtime,build_type,expected", [
+ ("Visual Studio", True, "MT", "Release", True),
+ ("msvc", True, "static", "Release", True),
+ ("Visual Studio", False, "MT", "Release", True),
+ ("Visual Studio", True, "MD", "Release", False),
+ ("msvc", True, "static", "Debug", True),
+ ("clang", True, None, "Debug", False),
+])
+def test_is_msvc_static_runtime(compiler, shared, runtime, build_type, expected):
+ options = MockOptions({"shared": shared})
+ settings = MockSettings({"build_type": build_type,
+ "arch": "x86_64",
+ "compiler": compiler,
+ "compiler.runtime": runtime,
+ "compiler.version": "17",
+ "cppstd": "17"})
+ conanfile = MockConanfile(settings, options)
+ assert is_msvc_static_runtime(conanfile) == expected
| [
{
"components": [
{
"doc": "Validate when building with Visual Studio or msvc and MT on runtime\n:param conanfile: ConanFile instance\n:return: True, if msvc + runtime MT. Otherwise, False",
"lines": [
186,
191
],
"name": "is_msvc_static_runtime",
... | [
"conans/test/unittests/tools/microsoft/test_msbuild.py::test_msbuild_cpu_count",
"conans/test/unittests/tools/microsoft/test_msbuild.py::test_msbuild_toolset",
"conans/test/unittests/tools/microsoft/test_msbuild.py::test_msbuild_toolset_for_intel_cc[icx-Intel",
"conans/test/unittests/tools/microsoft/test_msbu... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Identify when using static runtime
Changelog: Feature: Add `is_msvc_static_runtime` method to `conan.tools.microsoft.visual` to identify when using `msvc` with static runtime.
Docs: https://github.com/conan-io/docs/pull/2372
Related to discussion https://github.com/conan-io/conan/pull/10424#issuecomment-1021567713
The idea here helping static runtime identification and simplifying conditions in recipes, where can result in a prone error. Example:
```python
def validate(self):
if is_msvc_static_runtime(self) and self.options.shared:
raise ConanInvalidConfiguration("Shared option is not well supported with static runtime on Windows")
```
- [ ] Refer to the issue that supports this Pull Request.
- [x] If the issue has missing info, explain the purpose/use case/pain/need that covers this Pull Request.
- [x] I've read the [Contributing guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).
- [x] I've followed the PEP8 style guides for Python code.
- [x] I've opened another PR in the Conan docs repo to the ``develop`` branch, documenting this one.
<sup>**Note:** By default this PR will skip the slower tests and will use a limited set of python versions. Check [here](https://github.com/conan-io/conan/blob/develop/.github/PR_INCREASE_TESTING.md) how to increase the testing level by writing some tags in the current PR body text.</sup>
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/microsoft/visual.py]
(definition of is_msvc_static_runtime:)
def is_msvc_static_runtime(conanfile):
"""Validate when building with Visual Studio or msvc and MT on runtime
:param conanfile: ConanFile instance
:return: True, if msvc + runtime MT. Otherwise, False"""
[end of new definitions in conan/tools/microsoft/visual.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
boto__botocore-2599 | 2,599 | boto/botocore | null | d9d5ade65eeaf01178f6a3f74865ace07164f0f3 | 2022-01-25T20:57:09Z | diff --git a/.changes/next-release/enhancement-Requestheaders-37145.json b/.changes/next-release/enhancement-Requestheaders-37145.json
new file mode 100644
index 0000000000..82bd6bde65
--- /dev/null
+++ b/.changes/next-release/enhancement-Requestheaders-37145.json
@@ -0,0 +1,5 @@
+{
+ "type": "enhancement",
+ "category": "Request headers",
+ "description": "Adding request headers with retry information."
+}
diff --git a/botocore/endpoint.py b/botocore/endpoint.py
index f209170fd6..06bb458466 100644
--- a/botocore/endpoint.py
+++ b/botocore/endpoint.py
@@ -12,10 +12,12 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import datetime
import os
import logging
import time
import threading
+import uuid
from botocore.vendored import six
@@ -129,15 +131,60 @@ def prepare_request(self, request):
self._encode_headers(request.headers)
return request.prepare()
+ def _calculate_ttl(self, response_received_timestamp, date_header,
+ read_timeout):
+ local_timestamp = datetime.datetime.utcnow()
+ date_conversion = datetime.datetime.strptime(
+ date_header,
+ "%a, %d %b %Y %H:%M:%S %Z"
+ )
+ estimated_skew = date_conversion - response_received_timestamp
+ ttl = local_timestamp + datetime.timedelta(
+ seconds=read_timeout) + estimated_skew
+ return ttl.strftime('%Y%m%dT%H%M%SZ')
+
+ def _set_ttl(self, retries_context, read_timeout, success_response):
+ response_date_header = success_response[0].headers.get('Date')
+ has_streaming_input = retries_context.get('has_streaming_input')
+ if response_date_header and not has_streaming_input:
+ try:
+ response_received_timestamp = datetime.datetime.utcnow()
+ retries_context['ttl'] = self._calculate_ttl(
+ response_received_timestamp,
+ response_date_header,
+ read_timeout
+ )
+ except Exception:
+ logger.debug(
+ "Exception received when updating retries context with TTL",
+ exc_info=True
+ )
+
+ def _update_retries_context(
+ self, context, attempt, success_response=None
+ ):
+ retries_context = context.setdefault('retries', {})
+ retries_context['attempt'] = attempt
+ if 'invocation-id' not in retries_context:
+ retries_context['invocation-id'] = str(uuid.uuid4())
+
+ if success_response:
+ read_timeout = context['client_config'].read_timeout
+ self._set_ttl(retries_context, read_timeout, success_response)
+
def _send_request(self, request_dict, operation_model):
attempts = 1
- request = self.create_request(request_dict, operation_model)
context = request_dict['context']
+ self._update_retries_context(context, attempts)
+ request = self.create_request(request_dict, operation_model)
success_response, exception = self._get_response(
request, operation_model, context)
while self._needs_retry(attempts, operation_model, request_dict,
success_response, exception):
attempts += 1
+ self._update_retries_context(
+ context, attempts, success_response
+ )
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
diff --git a/botocore/handlers.py b/botocore/handlers.py
index 17b1df3a30..65bd6a0b20 100644
--- a/botocore/handlers.py
+++ b/botocore/handlers.py
@@ -943,6 +943,21 @@ def remove_lex_v2_start_conversation(class_attributes, **kwargs):
del class_attributes['start_conversation']
+def add_retry_headers(request, **kwargs):
+ retries_context = request.context.get('retries')
+ if not retries_context:
+ return
+ headers = request.headers
+ headers['amz-sdk-invocation-id'] = retries_context['invocation-id']
+ sdk_retry_keys = ('ttl', 'attempt', 'max')
+ sdk_request_headers = [
+ f'{key}={retries_context[key]}'
+ for key in sdk_retry_keys
+ if key in retries_context
+ ]
+ headers['amz-sdk-request'] = '; '.join(sdk_request_headers)
+
+
# This is a list of (event_name, handler).
# When a Session is created, everything in this list will be
# automatically registered with that Session.
@@ -996,6 +1011,7 @@ def remove_lex_v2_start_conversation(class_attributes, **kwargs):
('before-call.glacier.UploadArchive', add_glacier_checksums),
('before-call.glacier.UploadMultipartPart', add_glacier_checksums),
('before-call.ec2.CopySnapshot', inject_presigned_url_ec2),
+ ('request-created', add_retry_headers),
('request-created.machinelearning.Predict', switch_host_machinelearning),
('needs-retry.s3.UploadPartCopy', check_for_200_error, REGISTER_FIRST),
('needs-retry.s3.CopyObject', check_for_200_error, REGISTER_FIRST),
diff --git a/botocore/retries/standard.py b/botocore/retries/standard.py
index c522d06bc0..de4b73e44e 100644
--- a/botocore/retries/standard.py
+++ b/botocore/retries/standard.py
@@ -266,6 +266,11 @@ def __init__(self, max_attempts):
def is_retryable(self, context):
under_max_attempts = context.attempt_number < self._max_attempts
+ retries_context = context.request_context['retries']
+ retries_max_attempts = retries_context.setdefault(
+ 'max', self._max_attempts)
+ if self._max_attempts > retries_max_attempts:
+ retries_context['max'] = self._max_attempts
if not under_max_attempts:
logger.debug("Max attempts of %s reached.", self._max_attempts)
context.add_retry_metadata(MaxAttemptsReached=True)
diff --git a/botocore/retryhandler.py b/botocore/retryhandler.py
index cef29a7999..76a7027d05 100644
--- a/botocore/retryhandler.py
+++ b/botocore/retryhandler.py
@@ -180,7 +180,16 @@ def __call__(self, attempts, response, caught_exception, **kwargs):
this will process retries appropriately.
"""
- if self._checker(attempts, response, caught_exception):
+ checker_kwargs = {
+ 'attempt_number': attempts,
+ 'response': response,
+ 'caught_exception': caught_exception
+ }
+ if isinstance(self._checker, MaxAttemptsDecorator):
+ retries_context = kwargs['request_dict']['context']['retries']
+ checker_kwargs.update({'retries_context': retries_context})
+
+ if self._checker(**checker_kwargs):
result = self._action(attempts=attempts)
logger.debug("Retry needed, action of: %s", result)
return result
@@ -246,7 +255,13 @@ def __init__(self, checker, max_attempts, retryable_exceptions=None):
self._max_attempts = max_attempts
self._retryable_exceptions = retryable_exceptions
- def __call__(self, attempt_number, response, caught_exception):
+ def __call__(self, attempt_number, response, caught_exception,
+ retries_context):
+ retries_max_attempts = retries_context.setdefault(
+ 'max', self._max_attempts)
+ if self._max_attempts > retries_max_attempts:
+ retries_context['max'] = self._max_attempts
+
should_retry = self._should_retry(attempt_number, response,
caught_exception)
if should_retry:
| diff --git a/tests/functional/test_retry.py b/tests/functional/test_retry.py
index c5d59bb334..67ba56ba81 100644
--- a/tests/functional/test_retry.py
+++ b/tests/functional/test_retry.py
@@ -10,6 +10,8 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import botocore.endpoint
+import datetime
import contextlib
import json
from tests import BaseSessionTest, mock, ClientHTTPStubber
@@ -17,6 +19,8 @@
from botocore.exceptions import ClientError
from botocore.config import Config
+RETRY_MODES = ('legacy', 'standard', 'adaptive')
+
class BaseRetryTest(BaseSessionTest):
def setUp(self):
@@ -44,6 +48,144 @@ def assert_will_retry_n_times(self, client, num_retries,
self.assertEqual(len(http_stubber.requests), num_responses)
+class TestRetryHeader(BaseRetryTest):
+
+ def _retry_headers_test_cases(self):
+ responses = [
+ [
+ (500, {'Date': 'Sat, 01 Jun 2019 00:00:00 GMT'}),
+ (500, {'Date': 'Sat, 01 Jun 2019 00:00:01 GMT'}),
+ (200, {'Date': 'Sat, 01 Jun 2019 00:00:02 GMT'})
+ ],
+ [
+ (500, {'Date': 'Sat, 01 Jun 2019 00:10:03 GMT'}),
+ (500, {'Date': 'Sat, 01 Jun 2019 00:10:09 GMT'}),
+ (200, {'Date': 'Sat, 01 Jun 2019 00:10:15 GMT'})
+ ]
+ ]
+
+ # The first, third and seventh datetime values of each
+ # utcnow_side_effects list are side_effect values for when
+ # utcnow is called in SigV4 signing.
+ utcnow_side_effects = [
+ [
+ datetime.datetime(2019, 6, 1, 0, 0, 0, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 0, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 1, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 0, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 1, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 2, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 0, 0),
+ ],
+ [
+ datetime.datetime(2020, 6, 1, 0, 0, 0, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 5, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 6, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 0, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 11, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 12, 0),
+ datetime.datetime(2019, 6, 1, 0, 0, 0, 0),
+ ]
+ ]
+ expected_headers = [
+ [
+ b'attempt=1',
+ b'ttl=20190601T000011Z; attempt=2; max=3',
+ b'ttl=20190601T000012Z; attempt=3; max=3',
+ ],
+ [
+ b'attempt=1',
+ b'ttl=20190601T001014Z; attempt=2; max=3',
+ b'ttl=20190601T001020Z; attempt=3; max=3',
+ ]
+ ]
+ test_cases = list(
+ zip(responses,
+ utcnow_side_effects,
+ expected_headers))
+ return test_cases
+
+ def _test_amz_sdk_request_header_with_test_case(self, responses,
+ utcnow_side_effects,
+ expected_headers,
+ client_config):
+ datetime_patcher = mock.patch.object(
+ botocore.endpoint.datetime, 'datetime',
+ mock.Mock(wraps=datetime.datetime)
+ )
+ mocked_datetime = datetime_patcher.start()
+ mocked_datetime.utcnow.side_effect = utcnow_side_effects
+
+ client = self.session.create_client(
+ 'dynamodb', self.region, config=client_config)
+ with ClientHTTPStubber(client) as http_stubber:
+ for response in responses:
+ http_stubber.add_response(
+ headers=response[1],
+ status=response[0], body=b'{}')
+ client.list_tables()
+ amz_sdk_request_headers = [
+ request.headers['amz-sdk-request']
+ for request in http_stubber.requests
+ ]
+ self.assertListEqual(amz_sdk_request_headers, expected_headers)
+ datetime_patcher.stop()
+
+ def test_amz_sdk_request_header(self):
+ test_cases = self._retry_headers_test_cases()
+ for retry_mode in RETRY_MODES:
+ retries_config = {'mode': retry_mode, 'total_max_attempts': 3}
+ client_config = Config(read_timeout=10, retries=retries_config)
+ for test_case in test_cases:
+ self._test_amz_sdk_request_header_with_test_case(
+ *test_case, client_config=client_config)
+
+ def test_amz_sdk_invocation_id_header_persists(self):
+ for retry_mode in RETRY_MODES:
+ client_config = Config(retries={'mode': retry_mode})
+ client = self.session.create_client(
+ 'dynamodb', self.region, config=client_config)
+ num_retries = 2
+ with ClientHTTPStubber(client) as http_stubber:
+ for _ in range(num_retries):
+ http_stubber.add_response(status=500)
+ http_stubber.add_response(status=200)
+ client.list_tables()
+ amz_sdk_invocation_id_headers = [
+ request.headers['amz-sdk-invocation-id']
+ for request in http_stubber.requests
+ ]
+ self.assertEqual(
+ amz_sdk_invocation_id_headers[0],
+ amz_sdk_invocation_id_headers[1])
+ self.assertEqual(
+ amz_sdk_invocation_id_headers[1],
+ amz_sdk_invocation_id_headers[2])
+
+ def test_amz_sdk_invocation_id_header_unique_per_invocation(self):
+ client = self.session.create_client(
+ 'dynamodb', self.region)
+ num_of_invocations = 2
+ with ClientHTTPStubber(client) as http_stubber:
+ for _ in range(num_of_invocations):
+ http_stubber.add_response(status=500)
+ http_stubber.add_response(status=200)
+ client.list_tables()
+ amz_sdk_invocation_id_headers = [
+ request.headers['amz-sdk-invocation-id']
+ for request in http_stubber.requests
+ ]
+ self.assertEqual(
+ amz_sdk_invocation_id_headers[0],
+ amz_sdk_invocation_id_headers[1])
+ self.assertEqual(
+ amz_sdk_invocation_id_headers[2],
+ amz_sdk_invocation_id_headers[3])
+ self.assertNotEqual(
+ amz_sdk_invocation_id_headers[0],
+ amz_sdk_invocation_id_headers[2])
+
+
class TestLegacyRetry(BaseRetryTest):
def test_can_override_max_attempts(self):
client = self.session.create_client(
diff --git a/tests/unit/retries/test_standard.py b/tests/unit/retries/test_standard.py
index 0a8016ba01..8999a580e0 100644
--- a/tests/unit/retries/test_standard.py
+++ b/tests/unit/retries/test_standard.py
@@ -217,6 +217,7 @@ def _verify_retryable(checker, operation_model,
parsed_response=parsed_response,
http_response=http_response,
caught_exception=caught_exception,
+ request_context={'retries': {}}
)
assert checker.is_retryable(context) == is_retryable
@@ -231,6 +232,7 @@ def arbitrary_retry_context():
http_response=AWSResponse(status_code=500,
raw=None, headers={}, url='https://foo'),
caught_exception=None,
+ request_context={'retries': {}}
)
diff --git a/tests/unit/test_endpoint.py b/tests/unit/test_endpoint.py
index 8469fd903d..fe716d9849 100644
--- a/tests/unit/test_endpoint.py
+++ b/tests/unit/test_endpoint.py
@@ -11,13 +11,16 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import socket
+import datetime
import pytest
+import botocore.endpoint
from tests import mock
from tests import unittest
from botocore.compat import six
+from botocore.config import Config
from botocore.endpoint import Endpoint, DEFAULT_TIMEOUT
from botocore.endpoint import EndpointCreator
from botocore.exceptions import HTTPClientError
@@ -124,6 +127,29 @@ def test_make_request_with_context(self):
request = prepare.call_args[0][0]
self.assertEqual(request.context['signing']['region'], 'us-west-2')
+ def test_make_request_sets_retries_config_in_context(self):
+ r = request_dict()
+ r['context'] = {'signing': {'region': 'us-west-2'}}
+ with mock.patch('botocore.endpoint.Endpoint.prepare_request') as prepare:
+ self.endpoint.make_request(self.op, r)
+ request = prepare.call_args[0][0]
+ self.assertIn('retries', request.context)
+
+ def test_exception_caught_when_constructing_retries_context(self):
+ r = request_dict()
+ datetime_patcher = mock.patch.object(
+ botocore.endpoint.datetime, 'datetime',
+ mock.Mock(wraps=datetime.datetime)
+ )
+ r['context'] = {'signing': {'region': 'us-west-2'}}
+ with mock.patch('botocore.endpoint.Endpoint.prepare_request') as prepare:
+ mocked_datetime = datetime_patcher.start()
+ mocked_datetime.side_effect = Exception()
+ self.endpoint.make_request(self.op, r)
+ datetime_patcher.stop()
+ request = prepare.call_args[0][0]
+ self.assertIn('retries', request.context)
+
def test_parses_modeled_exception_fields(self):
# Setup the service model to have exceptions to generate the mapping
self.service_model = mock.Mock(spec=ServiceModel)
@@ -188,7 +214,9 @@ def test_retry_events_are_emitted(self):
def test_retry_events_can_alter_behavior(self):
self.event_emitter.emit.side_effect = self.get_emitter_responses(
num_retries=1)
- self.endpoint.make_request(self._operation, request_dict())
+ r = request_dict()
+ r['context']['client_config'] = Config()
+ self.endpoint.make_request(self._operation, r)
self.assert_events_emitted(
self.event_emitter,
expected_events=[
@@ -221,7 +249,9 @@ def test_retry_attempts_added_to_response_metadata(self):
parser = mock.Mock()
parser.parse.return_value = {'ResponseMetadata': {}}
self.factory.return_value.create_parser.return_value = parser
- response = self.endpoint.make_request(self._operation, request_dict())
+ r = request_dict()
+ r['context']['client_config'] = Config()
+ response = self.endpoint.make_request(self._operation, r)
self.assertEqual(response[1]['ResponseMetadata']['RetryAttempts'], 1)
def test_retry_attempts_is_zero_when_not_retried(self):
@@ -257,6 +287,7 @@ def test_reset_stream_on_retry(self):
op.metadata = {'protocol': 'rest-xml'}
request = request_dict()
request['body'] = body
+ request['context']['client_config'] = Config()
self.event_emitter.emit.side_effect = self.get_emitter_responses(
num_retries=2
)
diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py
index 6d0eb6b41e..527434229d 100644
--- a/tests/unit/test_handlers.py
+++ b/tests/unit/test_handlers.py
@@ -1023,7 +1023,7 @@ def test_s3_special_case_is_before_other_retry(self):
operation = service_model.operation_model('CopyObject')
responses = client.meta.events.emit(
'needs-retry.s3.CopyObject',
- request_dict={},
+ request_dict={'context': {'retries': {}}},
response=(mock.Mock(), mock.Mock()), endpoint=mock.Mock(),
operation=operation, attempts=1, caught_exception=None)
# This is implementation specific, but we're trying to verify that
diff --git a/tests/unit/test_retryhandler.py b/tests/unit/test_retryhandler.py
index 20d5d1dcdb..76453d1bf6 100644
--- a/tests/unit/test_retryhandler.py
+++ b/tests/unit/test_retryhandler.py
@@ -30,19 +30,39 @@
HTTP_200_RESPONSE = mock.Mock()
HTTP_200_RESPONSE.status_code = 200
+REQUEST_DICT = {
+ 'context': {
+ 'retries': {}
+ }
+}
+
class TestRetryCheckers(unittest.TestCase):
+ def construct_checker_kwargs(self, response, attempt_number,
+ caught_exception):
+ checker_kwargs = {
+ 'attempt_number': attempt_number,
+ 'response': response,
+ 'caught_exception': caught_exception
+ }
+ if isinstance(self.checker, retryhandler.MaxAttemptsDecorator):
+ checker_kwargs.update({'retries_context': REQUEST_DICT['context']})
+
+ return checker_kwargs
+
def assert_should_be_retried(self, response, attempt_number=1,
caught_exception=None):
- self.assertTrue(self.checker(
- response=response, attempt_number=attempt_number,
- caught_exception=caught_exception))
+ checker_kwargs = self.construct_checker_kwargs(
+ response, attempt_number, caught_exception)
+
+ self.assertTrue(self.checker(**checker_kwargs))
def assert_should_not_be_retried(self, response, attempt_number=1,
caught_exception=None):
- self.assertFalse(self.checker(
- response=response, attempt_number=attempt_number,
- caught_exception=caught_exception))
+ checker_kwargs = self.construct_checker_kwargs(
+ response, attempt_number, caught_exception)
+
+ self.assertFalse(self.checker(**checker_kwargs))
def test_status_code_checker(self):
self.checker = retryhandler.HTTPStatusCodeChecker(500)
@@ -203,16 +223,17 @@ def test_create_retry_handler_with_socket_errors(self):
exception = EndpointConnectionError(endpoint_url='')
with self.assertRaises(EndpointConnectionError):
handler(response=None, attempts=10,
- caught_exception=exception)
+ caught_exception=exception, request_dict=REQUEST_DICT)
# No connection error raised because attempts < max_attempts.
sleep_time = handler(response=None, attempts=1,
- caught_exception=exception)
+ caught_exception=exception, request_dict=REQUEST_DICT)
self.assertEqual(sleep_time, 1)
# But any other exception should be raised even if
# attempts < max_attempts.
with self.assertRaises(ValueError):
sleep_time = handler(
- response=None, attempts=1, caught_exception=ValueError()
+ response=None, attempts=1, caught_exception=ValueError(),
+ request_dict=REQUEST_DICT
)
def test_connection_timeouts_are_retried(self):
@@ -220,8 +241,10 @@ def test_connection_timeouts_are_retried(self):
# from requests. We should be retrying those.
handler = retryhandler.create_retry_handler(
self.retry_config, operation_name='OperationBar')
- sleep_time = handler(response=None, attempts=1,
- caught_exception=ReadTimeoutError(endpoint_url=''))
+ sleep_time = handler(
+ response=None, attempts=1,
+ caught_exception=ReadTimeoutError(endpoint_url=''),
+ request_dict=REQUEST_DICT)
self.assertEqual(sleep_time, 1)
def test_create_retry_handler_with_no_operation(self):
@@ -244,10 +267,11 @@ def test_crc32_check_propogates_error(self):
http_response.content = b'foo'
# The first 10 attempts we get a retry.
self.assertEqual(handler(response=(http_response, {}), attempts=1,
- caught_exception=None), 1)
+ caught_exception=None,
+ request_dict=REQUEST_DICT), 1)
with self.assertRaises(ChecksumError):
handler(response=(http_response, {}), attempts=10,
- caught_exception=None)
+ caught_exception=None, request_dict=REQUEST_DICT)
class TestRetryHandler(unittest.TestCase):
| diff --git a/.changes/next-release/enhancement-Requestheaders-37145.json b/.changes/next-release/enhancement-Requestheaders-37145.json
new file mode 100644
index 0000000000..82bd6bde65
--- /dev/null
+++ b/.changes/next-release/enhancement-Requestheaders-37145.json
@@ -0,0 +1,5 @@
+{
+ "type": "enhancement",
+ "category": "Request headers",
+ "description": "Adding request headers with retry information."
+}
| [
{
"components": [
{
"doc": "",
"lines": [
134,
144
],
"name": "Endpoint._calculate_ttl",
"signature": "def _calculate_ttl(self, response_received_timestamp, date_header, read_timeout):",
"type": "function"
},
{
"doc": ... | [
"tests/functional/test_retry.py::TestRetryHeader::test_amz_sdk_invocation_id_header_persists",
"tests/functional/test_retry.py::TestRetryHeader::test_amz_sdk_invocation_id_header_unique_per_invocation",
"tests/functional/test_retry.py::TestRetryHeader::test_amz_sdk_request_header",
"tests/unit/test_endpoint.p... | [
"tests/functional/test_retry.py::TestLegacyRetry::test_can_clobber_max_attempts_on_session",
"tests/functional/test_retry.py::TestLegacyRetry::test_can_override_max_attempts",
"tests/functional/test_retry.py::TestLegacyRetry::test_do_not_attempt_retries",
"tests/functional/test_retry.py::TestLegacyRetry::test... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Adding retry information headers
Adding the following request headers:
- `amz-invocation-id`: A uuid unique to each SDK request invocation.
- `amz-sdk-request`: Includes the following retry information for each SDK request: TTL, request attempt number, max number of retry attempts
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in botocore/endpoint.py]
(definition of Endpoint._calculate_ttl:)
def _calculate_ttl(self, response_received_timestamp, date_header, read_timeout):
(definition of Endpoint._set_ttl:)
def _set_ttl(self, retries_context, read_timeout, success_response):
(definition of Endpoint._update_retries_context:)
def _update_retries_context( self, context, attempt, success_response=None ):
[end of new definitions in botocore/endpoint.py]
[start of new definitions in botocore/handlers.py]
(definition of add_retry_headers:)
def add_retry_headers(request, **kwargs):
[end of new definitions in botocore/handlers.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 5e4b564dd0f9aab16a404251ebd3e675c9681492 | |
pvlib__pvlib-python-1391 | 1,391 | pvlib/pvlib-python | 0.8 | d5d1d66aae4913f0e23b9a79c655efa1bdafe5f4 | 2022-01-25T02:44:13Z | diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index a9138ca588..f716995416 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -3,7 +3,7 @@
- [ ] Closes #xxxx
- [ ] I am familiar with the [contributing guidelines](https://pvlib-python.readthedocs.io/en/latest/contributing.html)
- [ ] Tests added
- - [ ] Updates entries to [`docs/sphinx/source/api.rst`](https://github.com/pvlib/pvlib-python/blob/master/docs/sphinx/source/api.rst) for API changes.
+ - [ ] Updates entries in [`docs/sphinx/source/reference`](https://github.com/pvlib/pvlib-python/blob/master/docs/sphinx/source/reference) for API changes.
- [ ] Adds description and name entries in the appropriate "what's new" file in [`docs/sphinx/source/whatsnew`](https://github.com/pvlib/pvlib-python/tree/master/docs/sphinx/source/whatsnew) for all changes. Includes link to the GitHub Issue with `` :issue:`num` `` or this Pull Request with `` :pull:`num` ``. Includes contributor name and/or GitHub username (link with `` :ghuser:`user` ``).
- [ ] New code is fully documented. Includes [numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html) compliant docstrings, examples, and comments where necessary.
- [ ] Pull request is nearly complete and ready for detailed review.
diff --git a/docs/sphinx/source/reference/pv_modeling.rst b/docs/sphinx/source/reference/pv_modeling.rst
index dc2f8f1099..d1ae5a6559 100644
--- a/docs/sphinx/source/reference/pv_modeling.rst
+++ b/docs/sphinx/source/reference/pv_modeling.rst
@@ -43,6 +43,7 @@ PV temperature models
temperature.fuentes
temperature.ross
temperature.noct_sam
+ temperature.prilliman
pvsystem.PVSystem.get_cell_temperature
Temperature Model Parameters
diff --git a/docs/sphinx/source/whatsnew/v0.9.1.rst b/docs/sphinx/source/whatsnew/v0.9.1.rst
index 4ddcb9bd51..c23f50496c 100644
--- a/docs/sphinx/source/whatsnew/v0.9.1.rst
+++ b/docs/sphinx/source/whatsnew/v0.9.1.rst
@@ -11,6 +11,8 @@ Deprecations
Enhancements
~~~~~~~~~~~~
+* Added :py:func:`pvlib.temperature.prilliman` for modeling cell temperature
+ at short time steps (:issue:`1081`, :pull:`1391`)
Bug fixes
~~~~~~~~~
diff --git a/pvlib/clearsky.py b/pvlib/clearsky.py
index d54f57767b..9f355669d0 100644
--- a/pvlib/clearsky.py
+++ b/pvlib/clearsky.py
@@ -679,23 +679,6 @@ def _to_centered_series(vals, idx, samples_per_window):
return pd.Series(index=idx, data=vals).shift(shift)
-def _get_sample_intervals(times, win_length):
- """ Calculates time interval and samples per window for Reno-style clear
- sky detection functions
- """
- deltas = np.diff(times.values) / np.timedelta64(1, '60s')
-
- # determine if we can proceed
- if times.inferred_freq and len(np.unique(deltas)) == 1:
- sample_interval = times[1] - times[0]
- sample_interval = sample_interval.seconds / 60 # in minutes
- samples_per_window = int(win_length / sample_interval)
- return sample_interval, samples_per_window
- else:
- raise NotImplementedError('algorithm does not yet support unequal '
- 'times. consider resampling your data.')
-
-
def _clear_sample_index(clear_windows, samples_per_window, align, H):
"""
Returns indices of clear samples in clear windows
@@ -849,8 +832,8 @@ def detect_clearsky(measured, clearsky, times=None, window_length=10,
else:
clear = clearsky
- sample_interval, samples_per_window = _get_sample_intervals(times,
- window_length)
+ sample_interval, samples_per_window = \
+ tools._get_sample_intervals(times, window_length)
# generate matrix of integers for creating windows with indexing
H = hankel(np.arange(samples_per_window),
diff --git a/pvlib/temperature.py b/pvlib/temperature.py
index 422270b8d6..eb960701bd 100644
--- a/pvlib/temperature.py
+++ b/pvlib/temperature.py
@@ -7,6 +7,10 @@
import pandas as pd
from pvlib.tools import sind
from pvlib._deprecation import warn_deprecated
+from pvlib.tools import _get_sample_intervals
+import scipy
+import warnings
+
TEMPERATURE_MODEL_PARAMETERS = {
'sapm': {
@@ -821,3 +825,155 @@ def noct_sam(poa_global, temp_air, wind_speed, noct, module_efficiency,
heat_loss = 1 - module_efficiency / tau_alpha
wind_loss = 9.5 / (5.7 + 3.8 * wind_adj)
return temp_air + cell_temp_init * heat_loss * wind_loss
+
+
+def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None):
+ """
+ Smooth short-term cell temperature transients using the Prilliman model.
+
+ The Prilliman et al. model [1]_ applies a weighted moving average to
+ the output of a steady-state cell temperature model to account for
+ a module's thermal inertia by smoothing the cell temperature's
+ response to changing weather conditions.
+
+ .. warning::
+ This implementation requires the time series inputs to be regularly
+ sampled in time with frequency less than 20 minutes. Data with
+ irregular time steps should be resampled prior to using this function.
+
+ Parameters
+ ----------
+ temp_cell : pandas.Series with DatetimeIndex
+ Cell temperature modeled with steady-state assumptions. [C]
+
+ wind_speed : pandas.Series
+ Wind speed, adjusted to correspond to array height [m/s]
+
+ unit_mass : float, default 11.1
+ Total mass of module divided by its one-sided surface area [kg/m^2]
+
+ coefficients : 4-element list-like, optional
+ Values for coefficients a_0 through a_3, see Eq. 9 of [1]_
+
+ Returns
+ -------
+ temp_cell : pandas.Series
+ Smoothed version of the input cell temperature. Input temperature
+ with sampling interval >= 20 minutes is returned unchanged. [C]
+
+ Notes
+ -----
+ This smoothing model was developed and validated using the SAPM
+ cell temperature model for the steady-state input.
+
+ Smoothing is done using the 20 minute window behind each temperature
+ value. At the beginning of the series where a full 20 minute window is not
+ possible, partial windows are used instead.
+
+ Output ``temp_cell[k]`` is NaN when input ``wind_speed[k]`` is NaN, or
+ when no non-NaN data are in the input temperature for the 20 minute window
+ preceding index ``k``.
+
+ References
+ ----------
+ .. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani,
+ "Transient Weighted Moving-Average Model of Photovoltaic Module
+ Back-Surface Temperature," IEEE Journal of Photovoltaics, 2020.
+ :doi:`10.1109/JPHOTOV.2020.2992351`
+ """
+
+ # `sample_interval` in minutes:
+ sample_interval, samples_per_window = \
+ _get_sample_intervals(times=temp_cell.index, win_length=20)
+
+ if sample_interval >= 20:
+ warnings.warn("temperature.prilliman only applies smoothing when "
+ "the sampling interval is shorter than 20 minutes "
+ f"(input sampling interval: {sample_interval} minutes);"
+ " returning input temperature series unchanged")
+ # too coarsely sampled for smoothing to be relevant
+ return temp_cell
+
+ # handle cases where the time series is shorter than 20 minutes total
+ samples_per_window = min(samples_per_window, len(temp_cell))
+
+ # prefix with NaNs so that the rolling window is "full",
+ # even for the first actual value:
+ prefix = np.full(samples_per_window, np.nan)
+ temp_cell_prefixed = np.append(prefix, temp_cell.values)
+
+ # generate matrix of integers for creating windows with indexing
+ H = scipy.linalg.hankel(np.arange(samples_per_window),
+ np.arange(samples_per_window - 1,
+ len(temp_cell_prefixed) - 1))
+ # each row of `subsets` is the values in one window
+ subsets = temp_cell_prefixed[H].T
+
+ # `subsets` now looks like this (for 5-minute data, so 4 samples/window)
+ # where "1." is a stand-in for the actual temperature values
+ # [[nan, nan, nan, nan],
+ # [nan, nan, nan, 1.],
+ # [nan, nan, 1., 1.],
+ # [nan, 1., 1., 1.],
+ # [ 1., 1., 1., 1.],
+ # [ 1., 1., 1., 1.],
+ # [ 1., 1., 1., 1.],
+ # ...
+
+ # calculate weights for the values in each window
+ if coefficients is not None:
+ a = coefficients
+ else:
+ # values from [1], Table II
+ a = [0.0046, 0.00046, -0.00023, -1.6e-5]
+
+ wind_speed = wind_speed.values
+ p = a[0] + a[1]*wind_speed + a[2]*unit_mass + a[3]*wind_speed*unit_mass
+ # calculate the time lag for each sample in the window, paying attention
+ # to units (seconds for `timedeltas`, minutes for `sample_interval`)
+ timedeltas = np.arange(samples_per_window, 0, -1) * sample_interval * 60
+ weights = np.exp(-p[:, np.newaxis] * timedeltas)
+
+ # Set weights corresponding to the prefix values to zero; otherwise the
+ # denominator of the weighted average below would be wrong.
+ # Weights corresponding to (non-prefix) NaN values must be zero too
+ # for the same reason.
+
+ # Right now `weights` is something like this
+ # (using 5-minute inputs, so 4 samples per window -> 4 values per row):
+ # [[0.0611, 0.1229, 0.2472, 0.4972],
+ # [0.0611, 0.1229, 0.2472, 0.4972],
+ # [0.0611, 0.1229, 0.2472, 0.4972],
+ # [0.0611, 0.1229, 0.2472, 0.4972],
+ # [0.0611, 0.1229, 0.2472, 0.4972],
+ # [0.0611, 0.1229, 0.2472, 0.4972],
+ # [0.0611, 0.1229, 0.2472, 0.4972],
+ # ...
+
+ # After the next line, the NaNs in `subsets` will be zeros in `weights`,
+ # like this (with more zeros for any NaNs in the input temperature):
+
+ # [[0. , 0. , 0. , 0. ],
+ # [0. , 0. , 0. , 0.4972],
+ # [0. , 0. , 0.2472, 0.4972],
+ # [0. , 0.1229, 0.2472, 0.4972],
+ # [0.0611, 0.1229, 0.2472, 0.4972],
+ # [0.0611, 0.1229, 0.2472, 0.4972],
+ # [0.0611, 0.1229, 0.2472, 0.4972],
+ # ...
+
+ weights[np.isnan(subsets)] = 0
+
+ # change the first row of weights from zero to nan -- this is a
+ # trick to prevent div by zero warning when dividing by summed weights
+ weights[0, :] = np.nan
+
+ # finally, take the weighted average of each window:
+ # use np.nansum for numerator to ignore nans in input temperature, but
+ # np.sum for denominator to propagate nans in input wind speed.
+ numerator = np.nansum(subsets * weights, axis=1)
+ denominator = np.sum(weights, axis=1)
+ smoothed = numerator / denominator
+ smoothed[0] = temp_cell.values[0]
+ smoothed = pd.Series(smoothed, index=temp_cell.index)
+ return smoothed
diff --git a/pvlib/tools.py b/pvlib/tools.py
index eef80a3b37..94bd042afe 100644
--- a/pvlib/tools.py
+++ b/pvlib/tools.py
@@ -344,3 +344,20 @@ def _golden_sect_DataFrame(params, VL, VH, func):
raise Exception("EXCEPTION:iterations exceeded maximum (50)")
return func(df, 'V1'), df['V1']
+
+
+def _get_sample_intervals(times, win_length):
+ """ Calculates time interval and samples per window for Reno-style clear
+ sky detection functions
+ """
+ deltas = np.diff(times.values) / np.timedelta64(1, '60s')
+
+ # determine if we can proceed
+ if times.inferred_freq and len(np.unique(deltas)) == 1:
+ sample_interval = times[1] - times[0]
+ sample_interval = sample_interval.seconds / 60 # in minutes
+ samples_per_window = int(win_length / sample_interval)
+ return sample_interval, samples_per_window
+ else:
+ raise NotImplementedError('algorithm does not yet support unequal '
+ 'times. consider resampling your data.')
| diff --git a/pvlib/tests/test_temperature.py b/pvlib/tests/test_temperature.py
index 40d0ec2d6f..5630f441e5 100644
--- a/pvlib/tests/test_temperature.py
+++ b/pvlib/tests/test_temperature.py
@@ -8,6 +8,8 @@
from pvlib import temperature, tools
from pvlib._deprecation import pvlibDeprecationWarning
+import re
+
@pytest.fixture
def sapm_default():
@@ -293,3 +295,71 @@ def test_noct_sam_options():
def test_noct_sam_errors():
with pytest.raises(ValueError):
temperature.noct_sam(1000., 25., 1., 34., 0.2, array_height=3)
+
+
+def test_prilliman():
+ # test against values calculated using pvl_MAmodel_2, see pvlib #1081
+ times = pd.date_range('2019-01-01', freq='5min', periods=8)
+ cell_temperature = pd.Series([0, 1, 3, 6, 10, 15, 21, 27], index=times)
+ wind_speed = pd.Series([0, 1, 2, 3, 2, 1, 2, 3])
+
+ # default coeffs
+ expected = pd.Series([0, 0, 0.7047457, 2.21176412, 4.45584299, 7.63635512,
+ 12.26808265, 18.00305776], index=times)
+ actual = temperature.prilliman(cell_temperature, wind_speed, unit_mass=10)
+ assert_series_equal(expected, actual)
+
+ # custom coeffs
+ coefficients = [0.0046, 4.5537e-4, -2.2586e-4, -1.5661e-5]
+ expected = pd.Series([0, 0, 0.70716941, 2.2199537, 4.47537694, 7.6676931,
+ 12.30423167, 18.04215198], index=times)
+ actual = temperature.prilliman(cell_temperature, wind_speed, unit_mass=10,
+ coefficients=coefficients)
+ assert_series_equal(expected, actual)
+
+ # even very short inputs < 20 minutes total still work
+ times = pd.date_range('2019-01-01', freq='1min', periods=8)
+ cell_temperature = pd.Series([0, 1, 3, 6, 10, 15, 21, 27], index=times)
+ wind_speed = pd.Series([0, 1, 2, 3, 2, 1, 2, 3])
+ expected = pd.Series([0, 0, 0.53557976, 1.49270094, 2.85940173,
+ 4.63914366, 7.09641845, 10.24899272], index=times)
+ actual = temperature.prilliman(cell_temperature, wind_speed, unit_mass=12)
+ assert_series_equal(expected, actual)
+
+
+def test_prilliman_coarse():
+ # if the input series time step is >= 20 min, input is returned unchanged,
+ # and a warning is emitted
+ times = pd.date_range('2019-01-01', freq='30min', periods=3)
+ cell_temperature = pd.Series([0, 1, 3], index=times)
+ wind_speed = pd.Series([0, 1, 2])
+ msg = re.escape("temperature.prilliman only applies smoothing when the "
+ "sampling interval is shorter than 20 minutes (input "
+ "sampling interval: 30.0 minutes); returning "
+ "input temperature series unchanged")
+ with pytest.warns(UserWarning, match=msg):
+ actual = temperature.prilliman(cell_temperature, wind_speed)
+ assert_series_equal(cell_temperature, actual)
+
+
+def test_prilliman_nans():
+ # nans in inputs are handled appropriately; nans in input tcell
+ # are ignored but nans in wind speed cause nan in output
+ times = pd.date_range('2019-01-01', freq='1min', periods=8)
+ cell_temperature = pd.Series([0, 1, 3, 6, 10, np.nan, 21, 27], index=times)
+ wind_speed = pd.Series([0, 1, 2, 3, 2, 1, np.nan, 3])
+ actual = temperature.prilliman(cell_temperature, wind_speed)
+ expected = pd.Series([True, True, True, True, True, True, False, True],
+ index=times)
+ assert_series_equal(actual.notnull(), expected)
+
+ # check that nan temperatures do not mess up the weighted average;
+ # the original implementation did not set weight=0 for nan values,
+ # so the numerator of the weighted average ignored nans but the
+ # denominator (total weight) still included the weight for the nan.
+ cell_temperature = pd.Series([1, 1, 1, 1, 1, np.nan, 1, 1], index=times)
+ wind_speed = pd.Series(1, index=times)
+ actual = temperature.prilliman(cell_temperature, wind_speed)
+ # original implementation would return some values < 1 here
+ expected = pd.Series(1., index=times)
+ assert_series_equal(actual, expected)
| diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index a9138ca588..f716995416 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -3,7 +3,7 @@
- [ ] Closes #xxxx
- [ ] I am familiar with the [contributing guidelines](https://pvlib-python.readthedocs.io/en/latest/contributing.html)
- [ ] Tests added
- - [ ] Updates entries to [`docs/sphinx/source/api.rst`](https://github.com/pvlib/pvlib-python/blob/master/docs/sphinx/source/api.rst) for API changes.
+ - [ ] Updates entries in [`docs/sphinx/source/reference`](https://github.com/pvlib/pvlib-python/blob/master/docs/sphinx/source/reference) for API changes.
- [ ] Adds description and name entries in the appropriate "what's new" file in [`docs/sphinx/source/whatsnew`](https://github.com/pvlib/pvlib-python/tree/master/docs/sphinx/source/whatsnew) for all changes. Includes link to the GitHub Issue with `` :issue:`num` `` or this Pull Request with `` :pull:`num` ``. Includes contributor name and/or GitHub username (link with `` :ghuser:`user` ``).
- [ ] New code is fully documented. Includes [numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html) compliant docstrings, examples, and comments where necessary.
- [ ] Pull request is nearly complete and ready for detailed review.
diff --git a/docs/sphinx/source/reference/pv_modeling.rst b/docs/sphinx/source/reference/pv_modeling.rst
index dc2f8f1099..d1ae5a6559 100644
--- a/docs/sphinx/source/reference/pv_modeling.rst
+++ b/docs/sphinx/source/reference/pv_modeling.rst
@@ -43,6 +43,7 @@ PV temperature models
temperature.fuentes
temperature.ross
temperature.noct_sam
+ temperature.prilliman
pvsystem.PVSystem.get_cell_temperature
Temperature Model Parameters
diff --git a/docs/sphinx/source/whatsnew/v0.9.1.rst b/docs/sphinx/source/whatsnew/v0.9.1.rst
index 4ddcb9bd51..c23f50496c 100644
--- a/docs/sphinx/source/whatsnew/v0.9.1.rst
+++ b/docs/sphinx/source/whatsnew/v0.9.1.rst
@@ -11,6 +11,8 @@ Deprecations
Enhancements
~~~~~~~~~~~~
+* Added :py:func:`pvlib.temperature.prilliman` for modeling cell temperature
+ at short time steps (:issue:`1081`, :pull:`1391`)
Bug fixes
~~~~~~~~~
| [
{
"components": [
{
"doc": "Smooth short-term cell temperature transients using the Prilliman model.\n\nThe Prilliman et al. model [1]_ applies a weighted moving average to\nthe output of a steady-state cell temperature model to account for\na module's thermal inertia by smoothing the cell tempera... | [
"pvlib/tests/test_temperature.py::test_prilliman",
"pvlib/tests/test_temperature.py::test_prilliman_coarse",
"pvlib/tests/test_temperature.py::test_prilliman_nans"
] | [
"pvlib/tests/test_temperature.py::test_sapm_cell",
"pvlib/tests/test_temperature.py::test_sapm_module",
"pvlib/tests/test_temperature.py::test_sapm_cell_from_module",
"pvlib/tests/test_temperature.py::test_sapm_ndarray",
"pvlib/tests/test_temperature.py::test_sapm_series",
"pvlib/tests/test_temperature.py... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add Prilliman et al transience model to pvlib.temperature
- [x] Closes #1081
- [x] I am familiar with the [contributing guidelines](https://pvlib-python.readthedocs.io/en/latest/contributing.html)
- [x] Tests added
- [x] Updates entries to [`docs/sphinx/source/api.rst`](https://github.com/pvlib/pvlib-python/blob/master/docs/sphinx/source/api.rst) for API changes.
- [x] Adds description and name entries in the appropriate "what's new" file in [`docs/sphinx/source/whatsnew`](https://github.com/pvlib/pvlib-python/tree/master/docs/sphinx/source/whatsnew) for all changes. Includes link to the GitHub Issue with `` :issue:`num` `` or this Pull Request with `` :pull:`num` ``. Includes contributor name and/or GitHub username (link with `` :ghuser:`user` ``).
- [x] New code is fully documented. Includes [numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html) compliant docstrings, examples, and comments where necessary.
- [x] Pull request is nearly complete and ready for detailed review.
- [x] Maintainer: Appropriate GitHub Labels and Milestone are assigned to the Pull Request and linked Issue.
This PR adds a cleaned-up version of `prilliman_v5` from this notebook: https://gist.github.com/kanderso-nrel/1d6da384d7af8afc24c230f1f144eb57
Also: I noticed that one of the links in the PR template is out of date so I updated that here too.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in pvlib/temperature.py]
(definition of prilliman:)
def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None):
"""Smooth short-term cell temperature transients using the Prilliman model.
The Prilliman et al. model [1]_ applies a weighted moving average to
the output of a steady-state cell temperature model to account for
a module's thermal inertia by smoothing the cell temperature's
response to changing weather conditions.
.. warning::
This implementation requires the time series inputs to be regularly
sampled in time with frequency less than 20 minutes. Data with
irregular time steps should be resampled prior to using this function.
Parameters
----------
temp_cell : pandas.Series with DatetimeIndex
Cell temperature modeled with steady-state assumptions. [C]
wind_speed : pandas.Series
Wind speed, adjusted to correspond to array height [m/s]
unit_mass : float, default 11.1
Total mass of module divided by its one-sided surface area [kg/m^2]
coefficients : 4-element list-like, optional
Values for coefficients a_0 through a_3, see Eq. 9 of [1]_
Returns
-------
temp_cell : pandas.Series
Smoothed version of the input cell temperature. Input temperature
with sampling interval >= 20 minutes is returned unchanged. [C]
Notes
-----
This smoothing model was developed and validated using the SAPM
cell temperature model for the steady-state input.
Smoothing is done using the 20 minute window behind each temperature
value. At the beginning of the series where a full 20 minute window is not
possible, partial windows are used instead.
Output ``temp_cell[k]`` is NaN when input ``wind_speed[k]`` is NaN, or
when no non-NaN data are in the input temperature for the 20 minute window
preceding index ``k``.
References
----------
.. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani,
"Transient Weighted Moving-Average Model of Photovoltaic Module
Back-Surface Temperature," IEEE Journal of Photovoltaics, 2020.
:doi:`10.1109/JPHOTOV.2020.2992351`"""
[end of new definitions in pvlib/temperature.py]
[start of new definitions in pvlib/tools.py]
(definition of _get_sample_intervals:)
def _get_sample_intervals(times, win_length):
"""Calculates time interval and samples per window for Reno-style clear
sky detection functions"""
[end of new definitions in pvlib/tools.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
implement transient moving-average thermal model
**New Feature**
Implement the [transient moving-average model proposed by Matt Prillman](https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9095219)
**Describe the solution you'd like**
a new function `pvlib.temperature.prillman(times, ...)`
**Describe alternatives you've considered**
see also #1080
**Additional context**
see https://github.com/NREL/ssc/issues/261
----------
@mjprilliman FYI
I've taken a look at implementing this but am not satisfied with what I've come up with so far. Here are my notes in case anyone else is working on this.
##### Option 1: `cell_temp_ss.rolling('1200s').apply(_prilliman_average)`
My natural starting point was to use pandas to get the offset-based windows and apply a custom averaging function. The problem is that `tseries.rolling(...).apply(...)` is way too slow -- ~1 second computation time for three days of 1-minute data, even without a custom averaging function:
```python
In [50]: N=60*24*3 # three days of 1-min data
...: s = pd.Series(range(N), pd.date_range('2019-01-01', freq='1T', periods=N))
...: %time _=s.rolling('1200s').apply(np.mean)
...:
CPU times: user 1.08 s, sys: 2.41 ms, total: 1.08 s
Wall time: 1.02 s
```
It's also a little clumsy giving the averaging function access to the `P` time series. The best way I found involved lots of extra indexing and made it even slower.
##### Option 2: pandas EWM
pandas does provide exponential-weighted functions via `s.ewm`, but as far as I can tell it considers the entire history of the time series; there is no way to do a moving window (https://github.com/pandas-dev/pandas/issues/11288). That's maybe an acceptable error because the exponential weighting would minimize the contribution of points outside the window, but according to its docstring, the weight must be a float, i.e. it is a constant, which is not the case for the Prilliman et al model (Eq. 11; `P` depends on the wind speed time series). So I don't think the pandas EWM functions are flexible enough for this.
##### Option 3: constrain to regular time series
I think the inefficiency of `tseries.rolling(...).apply(...)` can be avoided if the input time series index is evenly spaced -- something like the Hankel matrix indexing approach of the pre-#1074 detect_clearsky. But it's a shame to require even spacing, especially when the paper specifically highlights the algorithm's ability to handle data with nonuniform sampling.
##### Option 4: accelerate it using numba, or maybe cython
Haven't tried it but I'm guessing this is the answer, in coordination with #1060. Like in #1037, I can't find the trick to get these transient models running efficiently with base pandas/numpy.
Hi Kevin,
I'd like to help with this implementation. I had previously written a standalone Python script that was running the model accurately but I will have to revisit to see how or if it can tie in with the existing framework. I am working on SAM features this week but I can start working on this next week (possibly late this week) if that works.
```
def pvl_MAmodel_2(Tmstamp, SS, WS, m_u, a=np.array([.0046,4.5537e-4,-2.2586e-4,-1.5661e-5])):
#Numpy array MA model
#convert datetime to datenum
Tmstamp['Datenum'] = Tmstamp[['Times']].apply(pd.to_numeric)/(1e9)
Tmstamp2 = np.array([Tmstamp.Datenum])
#Separate bilinear interpolation coefficients into 4 variables
a0 = a[0]
a1 = a[1]
a2 = a[2]
a3 = a[3]
#P = pd.DataFrame(a0+a1*WS+a2*m_u+a3*m_u*WS,dtype=float) # Power parameter for exponential weighting function
P = np.array(a0+a1*WS+a2*m_u+a3*m_u*WS)
#Initialize Results variable T_MA
#T_MA = pd.DataFrame(SS.values[0]) #Initialize Moving Average with
#T_MA = np.arange(len(Tmstamp))
#T_MA = np.full_like(T_MA,np.nan,dtype=np.double)
#T_MA = np.array(SS[0,0])
T_MA = np.array(SS[0])
#T_MA[0] = SS[0,0]
#Set constant window length
WindowLength = 20*60
#Initialize Back Counter
I_B = 0
cntr1 = 1
for i in np.linspace(1,len(Tmstamp)-1,len(Tmstamp)-1):
#Set cntr1 to iterative i to match Matlab code
I_F = cntr1 - 1 #Front indice always set one timestep behind current value
deltaT_I_F = Tmstamp2[0,cntr1] - Tmstamp2[0,I_F] #Time difference between current time and front indice (in case time series isn't uniform)
if deltaT_I_F > WindowLength:
#T_MA[cntr1] = SS[0,cntr1] #if the front indice is more than 20 minutes behind the current value, just use the steady-state approximation
#T_MA = np.append(T_MA,SS[0,cntr1])
T_MA = np.append(T_MA,SS[cntr1])
else:
while((Tmstamp2[0,cntr1] - Tmstamp2[0,I_B]) > WindowLength) & (I_B<I_F):
I_B = I_B+1
#If the front indice is within 20 minutes and the back indice is not going to bump into the front, bump up the back indice by one until it is within 20 minutes (window length)
TimeBack = (Tmstamp2[0,cntr1] - Tmstamp2[0,I_B:I_F+1]) #Calculate the time in seconds back from the current value for each value between the front and back indices
#TempsInWindow = SS[0,I_B:I_F+1] #find the steady-state temperature approximations for the times within the indices (include front indice)
TempsInWindow = SS[I_B:I_F+1]
#Weight = np.exp(-P[0,cntr1]*TimeBack.astype(float)) #Calculate the weight of each element in indice as function of power parameter P = fcn(WS, unit mass) and time back
Weight = np.exp(-P[cntr1]*TimeBack) #Calculate the weight of each element in indice as function of power parameter P = fcn(WS, unit mass) and time back
Rel_weight = Weight/np.sum(Weight) #Calculate weights relative to the total (weighted average)
TempsWeighted = (TempsInWindow)*Rel_weight;
Temp = np.sum(TempsWeighted);
#Temp = pd.DataFrame(TempsInWindow.dot(np.transpose(Rel_weight))) #Dot product to find the temperature prediction at the current timestep
#Temp = (TempsInWindow.dot(np.transpose(Rel_weight))) #Dot product to find the temperature prediction at the current timestep
#Temp = (np.transpose(TempsInWindow).dot(Rel_weight))
T_MA = np.append(T_MA,Temp) #append Temp to the ongoing T_MA dataframe (contains values for the whole year)
#T_MA[cntr1] = Temp
cntr1 += 1
return(T_MA)
```
For reference this is the Python function I was using in my grad studies, although most of the model development and validation was done in Matlab.
@kanderso-nrel I didn't realize that `rolling.apply()` was so slow (compared to built-in methods). I'll take a second look at it's use in #1074.
If performance is a roadblock, for irregular data on short intervals, resampling seems appropriate.
Thanks @mjprilliman, it'll be interesting to see how your implementation compares. It's good to have a reference implementation too. I don't think this function is tied to a deadline so of course whenever you can find the time for it is fine.
> I didn't realize that rolling.apply() was so slow (compared to built-in methods)
@cwhanse I forgot to mention this earlier but you can specify `raw=True` in `apply` to get an order of magnitude or two speedup. The difference is whether the applied function is passed Series objects (`raw=False`) or numpy arrays (`raw=True`). In this application I needed to keep it as a Series for the timedelta calculations so I used the slow route. Maybe the fast path could work in #1074, not sure.
>My natural starting point was to use pandas to get the offset-based windows and apply a custom averaging function. The problem is that `tseries.rolling(...).apply(...)` is way too slow
* It maybe b/c you are using apply. Have you tried: `tseries.rolling('1200s').mean()`? it may be faster
* A few times I have also found Pandas operations to be convenient but slow compared to NumPy, but AFAIK NumPy doesn't have `rolling()`, but I think I found this online somewhere:
```python
def moving_average(x, window=5):
"""
Moving average of numpy array
Parameters
----------
x : numeric
a numpy array to average
window : int
the window over which to average
Returns
-------
an array of the same size with index at beginning of window
If ``window <= 1`` then do nothing and return ``x``.
"""
if window <= 1: return x
m = window - 1
y = np.pad(x, (0, m), 'edge')
z = 0
for n in range(window):
z += np.roll(y, -n)
return z[:-m] / window
```
It's right bounded, so not sure how to backfill or center. I using [`numpy.roll`](https://numpy.org/doc/stable/reference/generated/numpy.roll.html) and [`numpy.pad`](https://numpy.org/doc/stable/reference/generated/numpy.pad.html)
> It maybe b/c you are using apply. Have you tried: tseries.rolling('1200s').mean()? it may be faster
The built-in functions are definitely way faster. The problem is that I didn't see a way to implement the exponential weighting using only the built-in functions, so I went with `apply` and a custom function. It's tricky not only because it's a weighted average of all points in the last 1200s but also because the weights vary for each window based on wind speed (and also the index if the series is irregularly-sampled).
> I think I found this online somewhere
The issue there, assuming we want to accommodate irregular sampling, is that the window size can vary based on how many timestamps fall into the last `1200s` -- it's an offset-based window, not a count-based window. I couldn't see an elegant/efficient way to recreate pandas's offset-based window with numpy, but would love to see one!
I recently became aware of [`np.nditer`](https://numpy.org/doc/stable/reference/generated/numpy.nditer.html), which sped up my rolling window apply greatly. I think you should be able to use it for this exponential weighted function, basically applied as a for loop with an efficiently stored/accessed array. I was doing a simple calc on 20 years of 525600 long series, and it was quite snappy, relative to the rolling window apply which was a long enough process that the full 20 year calc was at least tens of minutes if not an hour. Sorry I don't have more quantified values.
Here are a few prototype implementations. Note that the output is very slightly different from the function @mjprilliman posted above -- I've not yet chased down the difference but it's good enough for proof of concept anyway. tl;dr: the numpy-based implementations are quite fast but assume regular sampling. https://gist.github.com/kanderso-nrel/1d6da384d7af8afc24c230f1f144eb57
I still think it's a shame to not support irregular sampling, but the performance penalty of using pandas here is so huge that I think we should just require regular sampling. It's certainly much better than nothing, which is what we've had since this issue was opened (October 2020...).
One question for @mjprilliman: the handful of temperature near the start of the inputs don't have a full window of previous conditions to average across. The numpy implementations in my notebook linked above return NaN for those points, but your example implementation uses whatever values are available, even if they don't comprise a full 20-minute window. I think yours is more faithful to the reference's equations, but I wonder if mine is more consistent with its spirit? Which approach do you think makes sense here?
>
Hi Kevin,
Thanks for these examples. For your question, I would say the spirit of the model would be to include anything within the window even if there is not a full window to look back on. But taking model speed into account I think your v3 and v4 approaches make the most sense even without the partial window calculations. I would imagine in most use cases the first 20 minutes of data would occur in thermal equilibrium (night time) so a steady-state model would be sufficient. There could always be edge cases where some of the thermal inertia is not captured in the first time steps of a model but I think the speed up is worth more. Just my thoughts maybe @jsstein feels differently.
Nice work, Kevin. Based on a comment in the code, I wonder if this could help for the pandas version:
https://stackoverflow.com/questions/60736556/pandas-rolling-apply-using-multiple-columns/60918101#60918101
> the spirit of the model would be to include anything within the window even if there is not a full window to look back on.
I think the numpy functions could be modified to do this without a meaningful increase in runtime. I'll try that out before opening a PR. Thanks!
@adriesse I think that SO answer is essentially the same approach taken in the notebook: use the index of the moving subset to retrieve the corresponding subset of other columns. Maybe that notebook code could be made clearer, but I think the workaround to access multiple time series in a single rolling window is the same. Great minds think alike I guess ;)
> I think we should just require regular sampling.
I don't see that as a major problem. This function operates on the output of a temperature model (applies a form of exponential smoothing). It is likely that a user calculated that output with a regular time index. Even if not, the smoothing basically justifies interpolation on the input cell temperature and wind speed to a regular index, in the following sense: output from applying the function to interpolated input is likely to be very similar to output when applying the function to the non-regular input then interpolating the output.
--------------------
</issues> | 311781d2380997044da0e484dc90aa146a74ca44 |
sympy__sympy-22910 | 22,910 | sympy/sympy | 1.10 | 95a0b68b4b67d02102a274620da08c925cc401db | 2022-01-24T17:38:08Z | diff --git a/sympy/tensor/array/expressions/array_expressions.py b/sympy/tensor/array/expressions/array_expressions.py
index 75f534696d56..a9b9d1af16b6 100644
--- a/sympy/tensor/array/expressions/array_expressions.py
+++ b/sympy/tensor/array/expressions/array_expressions.py
@@ -7,7 +7,7 @@
import typing
-from sympy import Integer, KroneckerDelta
+from sympy import Integer, KroneckerDelta, Equality
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
@@ -16,6 +16,7 @@
from sympy.core.singleton import S
from sympy.core.sorting import default_sort_key
from sympy.core.symbol import (Dummy, Symbol)
+from sympy.matrices.common import MatrixCommon
from sympy.matrices.expressions.diagonal import diagonalize_vector
from sympy.matrices.expressions.matexpr import MatrixExpr
from sympy.matrices.expressions.special import ZeroMatrix
@@ -1458,6 +1459,46 @@ def as_explicit(self):
return tensorcontraction(self.expr.as_explicit(), *self.contraction_indices)
+class Reshape(_CodegenArrayAbstract):
+
+ def __new__(cls, expr, shape):
+ expr = _sympify(expr)
+ if not isinstance(shape, Tuple):
+ shape = Tuple(*shape)
+ if Equality(Mul.fromiter(expr.shape), Mul.fromiter(shape)) == False:
+ raise ValueError("shape mismatch")
+ obj = Expr.__new__(cls, expr, shape)
+ obj._shape = tuple(shape)
+ obj._expr = expr
+ return obj
+
+ @property
+ def shape(self):
+ return self._shape
+
+ @property
+ def expr(self):
+ return self._expr
+
+ def doit(self, *args, **kwargs):
+ if kwargs.get("deep", True):
+ expr = self.expr.doit(*args, **kwargs)
+ else:
+ expr = self.expr
+ if isinstance(expr, (MatrixCommon, NDimArray)):
+ return expr.reshape(*self.shape)
+ return Reshape(expr, self.shape)
+
+ def as_explicit(self):
+ ee = self.expr.as_explicit()
+ if isinstance(ee, MatrixCommon):
+ from sympy import Array
+ ee = Array(ee)
+ elif isinstance(ee, MatrixExpr):
+ return self
+ return ee.reshape(*self.shape)
+
+
class _ArgE:
"""
The ``_ArgE`` object contains references to the array expression
diff --git a/sympy/tensor/array/expressions/arrayexpr_derivatives.py b/sympy/tensor/array/expressions/arrayexpr_derivatives.py
index 97fa4659f5d5..3be83b731342 100644
--- a/sympy/tensor/array/expressions/arrayexpr_derivatives.py
+++ b/sympy/tensor/array/expressions/arrayexpr_derivatives.py
@@ -11,10 +11,10 @@
from sympy.combinatorics.permutations import _af_invert
from sympy.matrices.expressions.applyfunc import ElementwiseApplyFunction
from sympy.tensor.array.expressions.array_expressions import (
- _ArrayExpr, ZeroArray, ArraySymbol, ArrayTensorProduct, ArrayAdd,
- PermuteDims, ArrayDiagonal, ArrayElementwiseApplyFunc, get_rank,
- get_shape, ArrayContraction, _array_tensor_product, _array_contraction,
- _array_diagonal, _array_add, _permute_dims)
+ _ArrayExpr, ZeroArray, ArraySymbol, ArrayTensorProduct, ArrayAdd,
+ PermuteDims, ArrayDiagonal, ArrayElementwiseApplyFunc, get_rank,
+ get_shape, ArrayContraction, _array_tensor_product, _array_contraction,
+ _array_diagonal, _array_add, _permute_dims, Reshape)
from sympy.tensor.array.expressions.conv_matrix_to_array import convert_matrix_to_array
@@ -173,6 +173,12 @@ def _(expr: PermuteDims, x: Expr):
return _permute_dims(de, perm)
+@array_derive.register(Reshape)
+def _(expr: Reshape, x: Expr):
+ de = array_derive(expr.expr, x)
+ return Reshape(de, get_shape(x) + expr.shape)
+
+
def matrix_derive(expr, x):
from sympy.tensor.array.expressions.conv_array_to_matrix import convert_array_to_matrix
ce = convert_matrix_to_array(expr)
diff --git a/sympy/tensor/array/expressions/conv_matrix_to_array.py b/sympy/tensor/array/expressions/conv_matrix_to_array.py
index 3b3284abfe18..8f66961727f6 100644
--- a/sympy/tensor/array/expressions/conv_matrix_to_array.py
+++ b/sympy/tensor/array/expressions/conv_matrix_to_array.py
@@ -1,3 +1,4 @@
+from sympy import KroneckerProduct
from sympy.core.basic import Basic
from sympy.core.function import Lambda
from sympy.core.mul import Mul
@@ -14,7 +15,7 @@
from sympy.matrices.expressions.matexpr import MatrixExpr
from sympy.tensor.array.expressions.array_expressions import \
ArrayElementwiseApplyFunc, _array_tensor_product, _array_contraction, \
- _array_diagonal, _array_add, _permute_dims
+ _array_diagonal, _array_add, _permute_dims, Reshape
def convert_matrix_to_array(expr: Basic) -> Basic:
@@ -78,5 +79,9 @@ def convert_matrix_to_array(expr: Basic) -> Basic:
else:
d = Dummy("d")
return ArrayElementwiseApplyFunc(Lambda(d, d**exp), base)
+ elif isinstance(expr, KroneckerProduct):
+ kp_args = [convert_matrix_to_array(arg) for arg in expr.args]
+ permutation = [2*i for i in range(len(kp_args))] + [2*i + 1 for i in range(len(kp_args))]
+ return Reshape(_permute_dims(_array_tensor_product(*kp_args), permutation), expr.shape)
else:
return expr
| diff --git a/sympy/core/tests/test_args.py b/sympy/core/tests/test_args.py
index cb7d1c8df613..25d432336627 100644
--- a/sympy/core/tests/test_args.py
+++ b/sympy/core/tests/test_args.py
@@ -4970,6 +4970,12 @@ def test_sympy__tensor__array__expressions__array_expressions__ArrayElementwiseA
assert _test_args(ArrayElementwiseApplyFunc(exp, A))
+def test_sympy__tensor__array__expressions__array_expressions__Reshape():
+ from sympy.tensor.array.expressions.array_expressions import ArraySymbol, Reshape
+ A = ArraySymbol("A", (4,))
+ assert _test_args(Reshape(A, (2, 2)))
+
+
def test_sympy__codegen__ast__Assignment():
from sympy.codegen.ast import Assignment
assert _test_args(Assignment(x, y))
diff --git a/sympy/tensor/array/expressions/tests/test_array_expressions.py b/sympy/tensor/array/expressions/tests/test_array_expressions.py
index a84be3381cdd..245fb7a4fe40 100644
--- a/sympy/tensor/array/expressions/tests/test_array_expressions.py
+++ b/sympy/tensor/array/expressions/tests/test_array_expressions.py
@@ -1,6 +1,6 @@
import random
-from sympy import tensordiagonal, eye, KroneckerDelta
+from sympy import tensordiagonal, eye, KroneckerDelta, Array
from sympy.core.symbol import symbols
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.matrices.expressions.diagonal import DiagMatrix
@@ -12,7 +12,7 @@
from sympy.tensor.array.expressions.array_expressions import ZeroArray, OneArray, ArraySymbol, ArrayElement, \
PermuteDims, ArrayContraction, ArrayTensorProduct, ArrayDiagonal, \
ArrayAdd, nest_permutation, ArrayElementwiseApplyFunc, _EditArrayContraction, _ArgE, _array_tensor_product, \
- _array_contraction, _array_diagonal, _array_add, _permute_dims
+ _array_contraction, _array_diagonal, _array_add, _permute_dims, Reshape
from sympy.testing.pytest import raises
i, j, k, l, m, n = symbols("i j k l m n")
@@ -675,6 +675,12 @@ def test_array_expressions_no_canonicalization():
assert expr.doit() == PermuteDims(M, [1, 0])
assert expr._canonicalize() == expr.doit()
+ # Reshape
+
+ expr = Reshape(A, (k**2,))
+ assert expr.shape == (k**2,)
+ assert isinstance(expr, Reshape)
+
def test_array_expr_construction_with_functions():
@@ -728,3 +734,34 @@ def test_array_element_expressions():
assert K4[i, j, k, l].diff(K4[1, 2, 3, 4]) == (
KroneckerDelta(i, 1)*KroneckerDelta(j, 2)*KroneckerDelta(k, 3)*KroneckerDelta(l, 4)
)
+
+
+def test_array_expr_reshape():
+
+ A = MatrixSymbol("A", 2, 2)
+ B = ArraySymbol("B", (2, 2, 2))
+ C = Array([1, 2, 3, 4])
+
+ expr = Reshape(A, (4,))
+ assert expr.expr == A
+ assert expr.shape == (4,)
+ assert expr.as_explicit() == Array([A[0, 0], A[0, 1], A[1, 0], A[1, 1]])
+
+ expr = Reshape(B, (2, 4))
+ assert expr.expr == B
+ assert expr.shape == (2, 4)
+ ee = expr.as_explicit()
+ assert isinstance(ee, ImmutableDenseNDimArray)
+ assert ee.shape == (2, 4)
+ assert ee == Array([[B[0, 0, 0], B[0, 0, 1], B[0, 1, 0], B[0, 1, 1]], [B[1, 0, 0], B[1, 0, 1], B[1, 1, 0], B[1, 1, 1]]])
+
+ expr = Reshape(A, (k, 2))
+ assert expr.shape == (k, 2)
+
+ raises(ValueError, lambda: Reshape(A, (2, 3)))
+ raises(ValueError, lambda: Reshape(A, (3,)))
+
+ expr = Reshape(C, (2, 2))
+ assert expr.expr == C
+ assert expr.shape == (2, 2)
+ assert expr.doit() == Array([[1, 2], [3, 4]])
diff --git a/sympy/tensor/array/expressions/tests/test_arrayexpr_derivatives.py b/sympy/tensor/array/expressions/tests/test_arrayexpr_derivatives.py
index df153ff4a865..bc0fcf63f260 100644
--- a/sympy/tensor/array/expressions/tests/test_arrayexpr_derivatives.py
+++ b/sympy/tensor/array/expressions/tests/test_arrayexpr_derivatives.py
@@ -4,7 +4,7 @@
from sympy.matrices.expressions.special import Identity
from sympy.matrices.expressions.applyfunc import ElementwiseApplyFunction
from sympy.tensor.array.expressions.array_expressions import ArraySymbol, ArrayTensorProduct, \
- PermuteDims, ArrayDiagonal, ArrayElementwiseApplyFunc, ArrayContraction, _permute_dims
+ PermuteDims, ArrayDiagonal, ArrayElementwiseApplyFunc, ArrayContraction, _permute_dims, Reshape
from sympy.tensor.array.expressions.arrayexpr_derivatives import array_derive
k = symbols("k")
@@ -72,3 +72,7 @@ def test_arrayexpr_derivatives1():
ArrayElementwiseApplyFunc(cos, A1)
), (1, 6), (3, 7), (5, 8)
))
+
+ cg = Reshape(A, (k**2,))
+ res = array_derive(cg, A)
+ assert res == Reshape(PermuteDims(ArrayTensorProduct(I, I), [0, 2, 1, 3]), (k, k, k**2))
diff --git a/sympy/tensor/array/expressions/tests/test_convert_matrix_to_array.py b/sympy/tensor/array/expressions/tests/test_convert_matrix_to_array.py
index e804fad43b65..3968958a08b8 100644
--- a/sympy/tensor/array/expressions/tests/test_convert_matrix_to_array.py
+++ b/sympy/tensor/array/expressions/tests/test_convert_matrix_to_array.py
@@ -1,4 +1,4 @@
-from sympy import Lambda
+from sympy import Lambda, KroneckerProduct
from sympy.core.symbol import symbols, Dummy
from sympy.matrices.expressions.hadamard import (HadamardPower, HadamardProduct)
from sympy.matrices.expressions.inverse import Inverse
@@ -8,7 +8,7 @@
from sympy.matrices.expressions.trace import Trace
from sympy.matrices.expressions.transpose import Transpose
from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct, ArrayContraction, \
- PermuteDims, ArrayDiagonal, ArrayElementwiseApplyFunc, _array_contraction, _array_tensor_product
+ PermuteDims, ArrayDiagonal, ArrayElementwiseApplyFunc, _array_contraction, _array_tensor_product, Reshape
from sympy.tensor.array.expressions.conv_array_to_matrix import convert_array_to_matrix
from sympy.tensor.array.expressions.conv_matrix_to_array import convert_matrix_to_array
@@ -118,3 +118,11 @@ def test_arrayexpr_convert_matrix_to_array():
expr = a.T*b
cg = convert_matrix_to_array(expr)
assert cg == ArrayContraction(ArrayTensorProduct(a, b), (0, 2))
+
+ expr = KroneckerProduct(A, B)
+ cg = convert_matrix_to_array(expr)
+ assert cg == Reshape(PermuteDims(ArrayTensorProduct(A, B), [0, 2, 1, 3]), (k**2, k**2))
+
+ expr = KroneckerProduct(A, B, C, D)
+ cg = convert_matrix_to_array(expr)
+ assert cg == Reshape(PermuteDims(ArrayTensorProduct(A, B, C, D), [0, 2, 4, 6, 1, 3, 5, 7]), (k**4, k**4))
| [
{
"components": [
{
"doc": "",
"lines": [
1462,
1499
],
"name": "Reshape",
"signature": "class Reshape(_CodegenArrayAbstract):",
"type": "class"
},
{
"doc": "",
"lines": [
1464,
1473
... | [
"test_sympy__tensor__array__expressions__array_expressions__Reshape",
"test_array_symbol_and_element",
"test_zero_array",
"test_one_array",
"test_arrayexpr_contraction_construction",
"test_arrayexpr_array_flatten",
"test_arrayexpr_array_diagonal",
"test_arrayexpr_array_shape",
"test_arrayexpr_permut... | [
"test_all_classes_are_tested",
"test_sympy__algebras__quaternion__Quaternion",
"test_sympy__assumptions__assume__AppliedPredicate",
"test_predicates",
"test_sympy__assumptions__assume__UndefinedPredicate",
"test_sympy__assumptions__relation__binrel__AppliedBinaryRelation",
"test_sympy__assumptions__wrap... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Added Reshape operator for array expressions
Added Reshape operator for array expressions
<!-- BEGIN RELEASE NOTES -->
NO ENTRY
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/tensor/array/expressions/array_expressions.py]
(definition of Reshape:)
class Reshape(_CodegenArrayAbstract):
(definition of Reshape.__new__:)
def __new__(cls, expr, shape):
(definition of Reshape.shape:)
def shape(self):
(definition of Reshape.expr:)
def expr(self):
(definition of Reshape.doit:)
def doit(self, *args, **kwargs):
(definition of Reshape.as_explicit:)
def as_explicit(self):
[end of new definitions in sympy/tensor/array/expressions/array_expressions.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3e8695add7a25c8d70aeba7d6137496df02863fd | ||
sqlfluff__sqlfluff-2369 | 2,369 | sqlfluff/sqlfluff | 0.8 | d974cdaa1c49090fa1460c10d2b9d9ae69cd5fb8 | 2022-01-20T13:28:17Z | diff --git a/src/sqlfluff/core/rules/functional/segments.py b/src/sqlfluff/core/rules/functional/segments.py
index 0a68541bd13..6f19f3ff8ed 100644
--- a/src/sqlfluff/core/rules/functional/segments.py
+++ b/src/sqlfluff/core/rules/functional/segments.py
@@ -81,6 +81,14 @@ def raw_segments(self) -> "Segments":
raw_segments_list.extend(s.raw_segments)
return Segments(*raw_segments_list, templated_file=self.templated_file)
+ def recursive_crawl(self, *seg_type: str, recurse_into: bool = True) -> "Segments":
+ """Recursively crawl for segments of a given type."""
+ segments: List[BaseSegment] = []
+ for s in self:
+ for i in s.recursive_crawl(*seg_type, recurse_into):
+ segments.append(i)
+ return Segments(*segments, templated_file=self.templated_file)
+
def children(
self, predicate: Optional[Callable[[BaseSegment], bool]] = None
) -> "Segments":
| diff --git a/test/core/rules/functional/test_segments.py b/test/core/rules/functional/test_segments.py
index 1644e015fa9..ab7adf4cf20 100644
--- a/test/core/rules/functional/test_segments.py
+++ b/test/core/rules/functional/test_segments.py
@@ -1,6 +1,7 @@
"""Tests for the segments module."""
import pytest
+from sqlfluff.core.linter.linter import Linter
from sqlfluff.core.parser.segments.raw import RawSegment
from sqlfluff.core.rules.functional import segments
import sqlfluff.core.rules.functional.segment_predicates as sp
@@ -138,3 +139,24 @@ def test_segment_predicates_and():
)
== segments.Segments()
)
+
+
+def test_segments_recursive_crawl():
+ """Test the "recursive_crawl()" function."""
+ sql = """
+ WITH cte AS (
+ SELECT * FROM tab_a
+ )
+ SELECT
+ cte.col_a,
+ tab_b.col_b
+ FROM cte
+ INNER JOIN tab_b;
+ """
+ linter = Linter()
+ parsed = linter.parse_string(sql)
+
+ functional_tree = segments.Segments(parsed.tree)
+
+ assert len(functional_tree.recursive_crawl("common_table_expression")) == 1
+ assert len(functional_tree.recursive_crawl("table_reference")) == 3
| [
{
"components": [
{
"doc": "Recursively crawl for segments of a given type.",
"lines": [
84,
90
],
"name": "Segments.recursive_crawl",
"signature": "def recursive_crawl(self, *seg_type: str, recurse_into: bool = True) -> \"Segments\":",
"... | [
"test/core/rules/functional/test_segments.py::test_segments_recursive_crawl"
] | [
"test/core/rules/functional/test_segments.py::test_segments_add[lhs0-rhs0-expected0]",
"test/core/rules/functional/test_segments.py::test_segments_add[lhs1-rhs1-expected1]",
"test/core/rules/functional/test_segments.py::test_segments_add[lhs2-rhs2-expected2]",
"test/core/rules/functional/test_segments.py::tes... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Functional API: Segments.recursive_crawl
<!--Firstly, thanks for adding this feature! Secondly, please check the key steps against the checklist below to make your contribution easy to merge.-->
<!--Please give the Pull Request a meaningful title (including the dialect this PR is for if it is dialect specific), as this will automatically be added to the release notes, and then the Change Log.-->
### Brief summary of the change made
<!--If there is an open issue for this, then please include `fixes #XXXX` or `closes #XXXX` replacing `XXXX` with the issue number and it will automatically close the issue when the pull request is merged. Alternatively if not fully closed you can say `makes progress on #XXXX` to create a link on that issue without closing it.-->
As discussed [here](https://github.com/sqlfluff/sqlfluff/pull/2364#discussion_r788731722) we should add recursive_crawl to the functional API.
### Are there any other side effects of this change that we should be aware of?
No
### Pull Request checklist
- [X] Please confirm you have completed any of the necessary steps below.
- Included test cases to demonstrate any code changes, which may be one or more of the following:
- `.yml` rule test cases in `test/fixtures/rules/std_rule_cases`.
- `.sql`/`.yml` parser test cases in `test/fixtures/dialects` (note YML files can be auto generated with `tox -e generate-fixture-yml`).
- Full autofix test cases in `test/fixtures/linter/autofix`.
- Other.
- Added appropriate documentation for the change.
- Created GitHub issues for any relevant followup/future enhancements if appropriate.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/sqlfluff/core/rules/functional/segments.py]
(definition of Segments.recursive_crawl:)
def recursive_crawl(self, *seg_type: str, recurse_into: bool = True) -> "Segments":
"""Recursively crawl for segments of a given type."""
[end of new definitions in src/sqlfluff/core/rules/functional/segments.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | a5c4eae4e3e419fe95460c9afd9cf39a35a470c4 | ||
conan-io__conan-10380 | 10,380 | conan-io/conan | null | 5c05cd52fb8ab2ee6f43f9cde40b0c5b17595ac0 | 2022-01-19T18:15:11Z | diff --git a/conan/tools/system/__init__.py b/conan/tools/system/__init__.py
new file mode 100644
index 00000000000..ff8204ea6cd
--- /dev/null
+++ b/conan/tools/system/__init__.py
@@ -0,0 +1,1 @@
+from conan.tools.system.package_manager import Apt, Yum, Dnf, Brew, Pkg, PkgUtil, Chocolatey, PacMan, Zypper
diff --git a/conan/tools/system/package_manager.py b/conan/tools/system/package_manager.py
new file mode 100644
index 00000000000..2fee7296bda
--- /dev/null
+++ b/conan/tools/system/package_manager.py
@@ -0,0 +1,218 @@
+import platform
+
+from conans.client.graph.graph import CONTEXT_BUILD
+from conans.errors import ConanException
+
+
+class _SystemPackageManagerTool(object):
+ mode_check = "check"
+ mode_install = "install"
+ tool_name = None
+ install_command = ""
+ update_command = ""
+ check_command = ""
+
+ def __init__(self, conanfile):
+ self._conanfile = conanfile
+ self._active_tool = self._conanfile.conf["tools.system.package_manager:tool"] or self.get_default_tool()
+ self._sudo = self._conanfile.conf["tools.system.package_manager:sudo"]
+ self._sudo_askpass = self._conanfile.conf["tools.system.package_manager:sudo_askpass"]
+ self._mode = self._conanfile.conf["tools.system.package_manager:mode"] or self.mode_check
+ self._arch = self._conanfile.settings_build.get_safe('arch') \
+ if self._conanfile.context == CONTEXT_BUILD else self._conanfile.settings.get_safe('arch')
+ self._arch_names = {}
+ self._arch_separator = ""
+
+ def get_default_tool(self):
+ os_name = platform.system()
+ if os_name in ["Linux", "FreeBSD"]:
+ import distro
+ os_name = distro.id() or os_name
+ elif os_name == "Windows" and self._conanfile.conf["tools.microsoft.bash:subsystem"] == "msys2":
+ os_name = "msys2"
+ manager_mapping = {"apt-get": ["Linux", "ubuntu", "debian"],
+ "yum": ["pidora", "scientific", "xenserver", "amazon", "oracle", "amzn",
+ "almalinux"],
+ "dnf": ["fedora", "rhel", "centos", "mageia"],
+ "brew": ["Darwin"],
+ "pacman": ["arch", "manjaro", "msys2"],
+ "choco": ["Windows"],
+ "zypper": ["opensuse", "sles"],
+ "pkg": ["freebsd"],
+ "pkgutil": ["Solaris"]}
+ for tool, distros in manager_mapping.items():
+ if os_name in distros:
+ return tool
+
+ def get_package_name(self, package):
+ # TODO: should we only add the arch if cross-building?
+ if self._arch in self._arch_names:
+ return "{}{}{}".format(package, self._arch_separator,
+ self._arch_names.get(self._arch))
+ return package
+
+ @property
+ def sudo_str(self):
+ sudo = "sudo " if self._sudo else ""
+ askpass = "-A " if self._sudo and self._sudo_askpass else ""
+ return "{}{}".format(sudo, askpass)
+
+ def run(self, method, *args, **kwargs):
+ if self._active_tool == self.__class__.tool_name:
+ return method(*args, **kwargs)
+
+ def install(self, *args, **kwargs):
+ return self.run(self._install, *args, **kwargs)
+
+ def update(self, *args, **kwargs):
+ return self.run(self._update, *args, **kwargs)
+
+ def check(self, *args, **kwargs):
+ return self.run(self._check, *args, **kwargs)
+
+ def _install(self, packages, update=False, check=True, **kwargs):
+ if update:
+ self.update()
+
+ if check:
+ packages = self.check(packages)
+
+ if self._mode == self.mode_check and packages:
+ raise ConanException("System requirements: '{0}' are missing but can't install "
+ "because tools.system.package_manager:mode is '{1}'."
+ "Please update packages manually or set "
+ "'tools.system.package_manager:mode' "
+ "to '{2}' in the [conf] section of the profile, "
+ "or in the command line using "
+ "'-c tools.system.package_manager:mode={2}'".format(", ".join(packages),
+ self.mode_check,
+ self.mode_install))
+ elif packages:
+ packages_arch = [self.get_package_name(package) for package in packages]
+ if packages_arch:
+ command = self.install_command.format(sudo=self.sudo_str,
+ tool=self.tool_name,
+ packages=" ".join(packages_arch),
+ **kwargs)
+ return self._conanfile.run(command)
+ else:
+ self._conanfile.output.info("System requirements: {} already "
+ "installed".format(" ".join(packages)))
+
+ def _update(self):
+ if self._mode == self.mode_check:
+ raise ConanException("Can't update because tools.system.package_manager:mode is '{0}'."
+ "Please update packages manually or set "
+ "'tools.system.package_manager:mode' "
+ "to '{1}' in the [conf] section of the profile, "
+ "or in the command line using "
+ "'-c tools.system.package_manager:mode={1}'".format(self.mode_check,
+ self.mode_install))
+ command = self.update_command.format(sudo=self.sudo_str, tool=self.tool_name)
+ return self._conanfile.run(command)
+
+ def _check(self, packages):
+ missing = [pkg for pkg in packages if self.check_package(self.get_package_name(pkg)) != 0]
+ return missing
+
+ def check_package(self, package):
+ command = self.check_command.format(tool=self.tool_name,
+ package=package)
+ return self._conanfile.run(command, ignore_errors=True)
+
+
+class Apt(_SystemPackageManagerTool):
+ # TODO: apt? apt-get?
+ tool_name = "apt-get"
+ install_command = "{sudo}{tool} install -y {recommends}{packages}"
+ update_command = "{sudo}{tool} update"
+ check_command = "dpkg-query -W -f='${{Status}}' {package} | grep -q \"ok installed\""
+
+ def __init__(self, conanfile, arch_names=None):
+ super(Apt, self).__init__(conanfile)
+ self._arch_names = {"x86_64": "amd64",
+ "x86": "i386",
+ "ppc32": "powerpc",
+ "ppc64le": "ppc64el",
+ "armv7": "arm",
+ "armv7hf": "armhf",
+ "armv8": "arm64",
+ "s390x": "s390x"} if arch_names is None else arch_names
+
+ self._arch_separator = ":"
+
+ def install(self, packages, update=False, check=False, recommends=False):
+ recommends_str = '' if recommends else '--no-install-recommends '
+ return super(Apt, self).install(packages, update=update, check=check,
+ recommends=recommends_str)
+
+
+class Yum(_SystemPackageManagerTool):
+ tool_name = "yum"
+ install_command = "{sudo}{tool} install -y {packages}"
+ update_command = "{sudo}{tool} check-update -y"
+ check_command = "rpm -q {package}"
+
+ def __init__(self, conanfile, arch_names=None):
+ super(Yum, self).__init__(conanfile)
+ self._arch_names = {"x86_64": "x86_64",
+ "x86": "i?86",
+ "ppc32": "powerpc",
+ "ppc64le": "ppc64le",
+ "armv7": "armv7",
+ "armv7hf": "armv7hl",
+ "armv8": "aarch64",
+ "s390x": "s390x"} if arch_names is None else arch_names
+ self._arch_separator = "."
+
+
+class Dnf(Yum):
+ tool_name = "dnf"
+
+
+class Brew(_SystemPackageManagerTool):
+ tool_name = "brew"
+ install_command = "{sudo}{tool} install {packages}"
+ update_command = "{sudo}{tool} update"
+ check_command = 'test -n "$({tool} ls --versions {package})"'
+
+
+class Pkg(_SystemPackageManagerTool):
+ tool_name = "pkg"
+ install_command = "{sudo}{tool} install -y {packages}"
+ update_command = "{sudo}{tool} update"
+ check_command = "{tool} info {package}"
+
+
+class PkgUtil(_SystemPackageManagerTool):
+ tool_name = "pkgutil"
+ install_command = "{sudo}{tool} --install --yes {packages}"
+ update_command = "{sudo}{tool} --catalog"
+ check_command = 'test -n "`{tool} --list {package}`"'
+
+
+class Chocolatey(_SystemPackageManagerTool):
+ tool_name = "choco"
+ install_command = "{tool} --install --yes {packages}"
+ update_command = "{tool} outdated"
+ check_command = '{tool} search --local-only --exact {package} | ' \
+ 'findstr /c:"1 packages installed."'
+
+
+class PacMan(_SystemPackageManagerTool):
+ tool_name = "pacman"
+ install_command = "{sudo}{tool} -S --noconfirm {packages}"
+ update_command = "{sudo}{tool} -Syyu --noconfirm"
+ check_command = "{tool} -Qi {package}"
+
+ def __init__(self, conanfile, arch_names=None):
+ super(PacMan, self).__init__(conanfile)
+ self._arch_names = {"x86": "lib32"} if arch_names is None else arch_names
+ self._arch_separator = "-"
+
+
+class Zypper(_SystemPackageManagerTool):
+ tool_name = "zypper"
+ install_command = "{sudo}{tool} --non-interactive in {packages}"
+ update_command = "{sudo}{tool} --non-interactive ref"
+ check_command = "rpm -q {package}"
diff --git a/conans/requirements.txt b/conans/requirements.txt
index 2b43d530f41..3ee8a2a5719 100644
--- a/conans/requirements.txt
+++ b/conans/requirements.txt
@@ -7,7 +7,7 @@ patch-ng>=1.17.4, <1.18
fasteners>=0.14.1
six>=1.10.0,<=1.16.0
node-semver==0.6.1
-distro>=1.0.2, <=1.6.0
+distro>=1.0.2, <=1.6.0; sys_platform == 'linux' or sys_platform == 'linux2'
pygments>=2.0, <3.0
tqdm>=4.28.1, <5
Jinja2>=2.9, <4.0.0
| diff --git a/conans/test/conftest.py b/conans/test/conftest.py
index c05ad3e1a1a..c7f06df173b 100644
--- a/conans/test/conftest.py
+++ b/conans/test/conftest.py
@@ -110,6 +110,8 @@
"Darwin": '/Users/jenkins/bin'}},
},
'premake': {},
+ 'apt_get': { "exe": "apt-get"},
+ 'brew': {},
# TODO: Intel oneAPI is not installed in CI yet. Uncomment this line whenever it's done.
# "intel_oneapi": {
# "default": "2021.3",
diff --git a/conans/test/functional/tools/system/__init__.py b/conans/test/functional/tools/system/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/conans/test/functional/tools/system/package_manager_test.py b/conans/test/functional/tools/system/package_manager_test.py
new file mode 100644
index 00000000000..08ed908b6f8
--- /dev/null
+++ b/conans/test/functional/tools/system/package_manager_test.py
@@ -0,0 +1,112 @@
+import platform
+import textwrap
+
+import pytest
+import six
+
+from conans.test.utils.tools import TestClient
+
+
+@pytest.mark.tool_apt_get
+@pytest.mark.skipif(platform.system() != "Linux", reason="Requires apt")
+@pytest.mark.skipif(six.PY2, reason="Does not pass on Py2 with Pytest")
+def test_apt_check():
+ client = TestClient()
+ client.save({"conanfile.py": textwrap.dedent("""
+ from conans import ConanFile
+ from conan.tools.system import Apt
+ class MyPkg(ConanFile):
+ settings = "arch", "os"
+ def system_requirements(self):
+ apt = Apt(self)
+ not_installed = apt.check(["non-existing1", "non-existing2"])
+ print("missing:", not_installed)
+ """)})
+ client.run("create . test/1.0@ -s:b arch=armv8 -s:h arch=x86")
+ assert "dpkg-query: no packages found matching non-existing1:i386" in client.out
+ assert "dpkg-query: no packages found matching non-existing2:i386" in client.out
+ assert "missing: ['non-existing1', 'non-existing2']" in client.out
+
+
+@pytest.mark.tool_apt_get
+@pytest.mark.skipif(platform.system() != "Linux", reason="Requires apt")
+@pytest.mark.skipif(six.PY2, reason="Does not pass on Py2 with Pytest")
+def test_build_require():
+ client = TestClient()
+ client.save({"tool_require.py": textwrap.dedent("""
+ from conans import ConanFile
+ from conan.tools.system import Apt
+ class MyPkg(ConanFile):
+ settings = "arch", "os"
+ def system_requirements(self):
+ apt = Apt(self)
+ not_installed = apt.check(["non-existing1", "non-existing2"])
+ print("missing:", not_installed)
+ """)})
+ client.run("export tool_require.py tool_require/1.0@")
+ client.save({"consumer.py": textwrap.dedent("""
+ from conans import ConanFile
+ class consumer(ConanFile):
+ settings = "arch", "os"
+ tool_requires = "tool_require/1.0"
+ """)})
+ client.run("create consumer.py consumer/1.0@ -s:b arch=armv8 -s:h arch=x86 --build=missing")
+ assert "dpkg-query: no packages found matching non-existing1:arm64" in client.out
+ assert "dpkg-query: no packages found matching non-existing2:arm64" in client.out
+ assert "missing: ['non-existing1', 'non-existing2']" in client.out
+
+
+@pytest.mark.tool_brew
+@pytest.mark.skipif(platform.system() != "Darwin", reason="Requires brew")
+@pytest.mark.skipif(six.PY2, reason="Does not pass on Py2 with Pytest")
+def test_brew_check():
+ client = TestClient()
+ client.save({"conanfile.py": textwrap.dedent("""
+ from conans import ConanFile
+ from conan.tools.system import Brew
+ class MyPkg(ConanFile):
+ settings = "arch"
+ def system_requirements(self):
+ brew = Brew(self)
+ not_installed = brew.check(["non-existing1", "non-existing2"])
+ print("missing:", not_installed)
+ """)})
+ client.run("create . test/1.0@")
+ assert "missing: ['non-existing1', 'non-existing2']" in client.out
+
+
+@pytest.mark.tool_brew
+@pytest.mark.skipif(platform.system() != "Darwin", reason="Requires brew")
+@pytest.mark.skip(reason="brew update takes a lot of time")
+def test_brew_install_check_mode():
+ client = TestClient()
+ client.save({"conanfile.py": textwrap.dedent("""
+ from conans import ConanFile
+ from conan.tools.system import Brew
+ class MyPkg(ConanFile):
+ settings = "arch"
+ def system_requirements(self):
+ brew = Brew(self)
+ brew.install(["non-existing1", "non-existing2"])
+ """)})
+ client.run("create . test/1.0@", assert_error=True)
+ assert "System requirements: 'non-existing1, non-existing2' are missing but " \
+ "can't install because tools.system.package_manager:mode is 'check'" in client.out
+
+
+@pytest.mark.tool_brew
+@pytest.mark.skipif(platform.system() != "Darwin", reason="Requires brew")
+@pytest.mark.skip(reason="brew update takes a lot of time")
+def test_brew_install_install_mode():
+ client = TestClient()
+ client.save({"conanfile.py": textwrap.dedent("""
+ from conans import ConanFile
+ from conan.tools.system import Brew
+ class MyPkg(ConanFile):
+ settings = "arch"
+ def system_requirements(self):
+ brew = Brew(self)
+ brew.install(["non-existing1", "non-existing2"])
+ """)})
+ client.run("create . test/1.0@ -c tools.system.package_manager:mode=install", assert_error=True)
+ assert "Error: No formulae found in taps." in client.out
diff --git a/conans/test/integration/tools/system/__init__.py b/conans/test/integration/tools/system/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/conans/test/integration/tools/system/package_manager_test.py b/conans/test/integration/tools/system/package_manager_test.py
new file mode 100644
index 00000000000..b26b841128e
--- /dev/null
+++ b/conans/test/integration/tools/system/package_manager_test.py
@@ -0,0 +1,211 @@
+import platform
+
+import mock
+import pytest
+from mock.mock import PropertyMock
+
+from conan.tools.system.package_manager import Apt, Dnf, Yum, Brew, Pkg, PkgUtil, Chocolatey, Zypper, \
+ PacMan, _SystemPackageManagerTool
+from conans import Settings
+from conans.errors import ConanException
+from conans.model.conf import Conf
+from conans.test.utils.mocks import ConanFileMock
+
+
+@pytest.mark.parametrize("platform, tool", [
+ ("Linux", "apt-get"),
+ ("Windows", "choco"),
+ ("Darwin", "brew"),
+ ("Solaris", "pkgutil"),
+])
+@pytest.mark.skipif(platform.system() != "Linux", reason="Only linux")
+def test_package_manager_platform(platform, tool):
+ with mock.patch("platform.system", return_value=platform):
+ with mock.patch("distro.id", return_value=''):
+ with mock.patch('conans.ConanFile.context', new_callable=PropertyMock) as context_mock:
+ context_mock.return_value = "host"
+ conanfile = ConanFileMock()
+ conanfile.settings = Settings()
+ manager = _SystemPackageManagerTool(conanfile)
+ assert tool == manager.get_default_tool()
+
+
+@pytest.mark.skipif(platform.system() != "Windows", reason="Only Windows")
+def test_msys2():
+ with mock.patch("platform.system", return_value="Windows"):
+ with mock.patch('conans.ConanFile.context', new_callable=PropertyMock) as context_mock:
+ context_mock.return_value = "host"
+ conanfile = ConanFileMock()
+ conanfile.conf = Conf()
+ conanfile.settings = Settings()
+ conanfile.conf["tools.microsoft.bash:subsystem"] = "msys2"
+ manager = _SystemPackageManagerTool(conanfile)
+ assert manager.get_default_tool() == "pacman"
+
+
+@pytest.mark.parametrize("distro, tool", [
+ ("ubuntu", "apt-get"),
+ ("debian", "apt-get"),
+ ("pidora", "yum"),
+ ("fedora", "dnf"),
+ ("arch", "pacman"),
+ ("opensuse", "zypper"),
+ ("freebsd", "pkg"),
+])
+@pytest.mark.skipif(platform.system() != "Linux", reason="Only linux")
+def test_package_manager_distro(distro, tool):
+ with mock.patch("platform.system", return_value="Linux"):
+ with mock.patch("distro.id", return_value=distro):
+ with mock.patch('conans.ConanFile.context', new_callable=PropertyMock) as context_mock:
+ context_mock.return_value = "host"
+ conanfile = ConanFileMock()
+ conanfile.settings = Settings()
+ manager = _SystemPackageManagerTool(conanfile)
+ assert tool == manager.get_default_tool()
+
+
+@pytest.mark.parametrize("sudo, sudo_askpass, expected_str", [
+ (True, True, "sudo -A "),
+ (True, False, "sudo "),
+ (False, True, ""),
+ (False, False, ""),
+])
+def test_sudo_str(sudo, sudo_askpass, expected_str):
+ conanfile = ConanFileMock()
+ conanfile.conf = Conf()
+ conanfile.settings = Settings()
+ conanfile.conf["tools.system.package_manager:sudo"] = sudo
+ conanfile.conf["tools.system.package_manager:sudo_askpass"] = sudo_askpass
+ with mock.patch('conans.ConanFile.context', new_callable=PropertyMock) as context_mock:
+ context_mock.return_value = "host"
+ apt = Apt(conanfile)
+ assert apt.sudo_str == expected_str
+
+
+@pytest.mark.parametrize("recommends, recommends_str", [
+ (False, "--no-install-recommends "),
+ (True, ""),
+])
+def test_apt_install_recommends(recommends, recommends_str):
+ conanfile = ConanFileMock()
+ conanfile.conf = Conf()
+ conanfile.settings = Settings()
+ conanfile.conf["tools.system.package_manager:tool"] = "apt-get"
+ conanfile.conf["tools.system.package_manager:mode"] = "install"
+ with mock.patch('conans.ConanFile.context', new_callable=PropertyMock) as context_mock:
+ context_mock.return_value = "host"
+ apt = Apt(conanfile)
+ apt.install(["package1", "package2"], recommends=recommends)
+ assert apt._conanfile.command == "apt-get install -y {}package1 package2".format(recommends_str)
+
+
+@pytest.mark.parametrize("tool_class",
+ [Apt, Yum, Dnf, Brew, Pkg, PkgUtil, Chocolatey, PacMan, Zypper])
+def test_tools_install_mode_check(tool_class):
+ conanfile = ConanFileMock()
+ conanfile.conf = Conf()
+ conanfile.settings = Settings()
+ conanfile.conf["tools.system.package_manager:tool"] = tool_class.tool_name
+ with mock.patch('conans.ConanFile.context', new_callable=PropertyMock) as context_mock:
+ context_mock.return_value = "host"
+ tool = tool_class(conanfile)
+ with pytest.raises(ConanException) as exc_info:
+ tool.install(["package1", "package2"])
+ assert exc_info.value.args[0] == "System requirements: 'package1, package2' are missing but " \
+ "can't install because tools.system.package_manager:mode is " \
+ "'check'.Please update packages manually or set " \
+ "'tools.system.package_manager:mode' to 'install' in the [conf] " \
+ "section of the profile, or in the command line using " \
+ "'-c tools.system.package_manager:mode=install'"
+
+
+@pytest.mark.parametrize("tool_class",
+ [Apt, Yum, Dnf, Brew, Pkg, PkgUtil, Chocolatey, PacMan, Zypper])
+def test_tools_update_mode_check(tool_class):
+ conanfile = ConanFileMock()
+ conanfile.conf = Conf()
+ conanfile.settings = Settings()
+ conanfile.conf["tools.system.package_manager:tool"] = tool_class.tool_name
+ conanfile.conf["tools.system.package_manager:mode"] = "check"
+ with mock.patch('conans.ConanFile.context', new_callable=PropertyMock) as context_mock:
+ context_mock.return_value = "host"
+ tool = tool_class(conanfile)
+ with pytest.raises(ConanException) as exc_info:
+ tool.update()
+ assert exc_info.value.args[0] == "Can't update because tools.system.package_manager:mode is " \
+ "'check'.Please update packages manually or set " \
+ "'tools.system.package_manager:mode' to 'install' in the [conf] " \
+ "section of the profile, or in the command line using " \
+ "'-c tools.system.package_manager:mode=install'"
+
+
+
+@pytest.mark.parametrize("tool_class, result", [
+ (Apt, "apt-get update"),
+ (Yum, "yum check-update -y"),
+ (Dnf, "dnf check-update -y"),
+ (Brew, "brew update"),
+ (Pkg, "pkg update"),
+ (PkgUtil, "pkgutil --catalog"),
+ (Chocolatey, "choco outdated"),
+ (PacMan, "pacman -Syyu --noconfirm"),
+ (Zypper, "zypper --non-interactive ref"),
+])
+def test_tools_update_mode_install(tool_class, result):
+ conanfile = ConanFileMock()
+ conanfile.conf = Conf()
+ conanfile.settings = Settings()
+ conanfile.conf["tools.system.package_manager:tool"] = tool_class.tool_name
+ conanfile.conf["tools.system.package_manager:mode"] = "install"
+ with mock.patch('conans.ConanFile.context', new_callable=PropertyMock) as context_mock:
+ context_mock.return_value = "host"
+ tool = tool_class(conanfile)
+ tool.update()
+ assert tool._conanfile.command == result
+
+
+@pytest.mark.parametrize("tool_class, result", [
+ (Apt, 'apt-get install -y --no-install-recommends package1 package2'),
+ (Yum, 'yum install -y package1 package2'),
+ (Dnf, 'dnf install -y package1 package2'),
+ (Brew, 'brew install package1 package2'),
+ (Pkg, 'pkg install -y package1 package2'),
+ (PkgUtil, 'pkgutil --install --yes package1 package2'),
+ (Chocolatey, 'choco --install --yes package1 package2'),
+ (PacMan, 'pacman -S --noconfirm package1 package2'),
+ (Zypper, 'zypper --non-interactive in package1 package2'),
+])
+def test_tools_install_mode_install(tool_class, result):
+ conanfile = ConanFileMock()
+ conanfile.conf = Conf()
+ conanfile.settings = Settings()
+ conanfile.conf["tools.system.package_manager:tool"] = tool_class.tool_name
+ conanfile.conf["tools.system.package_manager:mode"] = "install"
+ with mock.patch('conans.ConanFile.context', new_callable=PropertyMock) as context_mock:
+ context_mock.return_value = "host"
+ tool = tool_class(conanfile)
+ tool.install(["package1", "package2"])
+ assert tool._conanfile.command == result
+
+
+@pytest.mark.parametrize("tool_class, result", [
+ (Apt, 'dpkg-query -W -f=\'${Status}\' package | grep -q "ok installed"'),
+ (Yum, 'rpm -q package'),
+ (Dnf, 'rpm -q package'),
+ (Brew, 'test -n "$(brew ls --versions package)"'),
+ (Pkg, 'pkg info package'),
+ (PkgUtil, 'test -n "`pkgutil --list package`"'),
+ (Chocolatey, 'choco search --local-only --exact package | findstr /c:"1 packages installed."'),
+ (PacMan, 'pacman -Qi package'),
+ (Zypper, 'rpm -q package'),
+])
+def test_tools_check(tool_class, result):
+ conanfile = ConanFileMock()
+ conanfile.conf = Conf()
+ conanfile.settings = Settings()
+ conanfile.conf["tools.system.package_manager:tool"] = tool_class.tool_name
+ with mock.patch('conans.ConanFile.context', new_callable=PropertyMock) as context_mock:
+ context_mock.return_value = "host"
+ tool = tool_class(conanfile)
+ tool.check(["package"])
+ assert tool._conanfile.command == result
diff --git a/conans/test/utils/mocks.py b/conans/test/utils/mocks.py
index d4beeb46bcd..250f520d5d1 100644
--- a/conans/test/utils/mocks.py
+++ b/conans/test/utils/mocks.py
@@ -182,7 +182,7 @@ def __init__(self, shared=None, options=None, options_values=None):
self.win_bash = None
self.conf = ConfDefinition().get_conanfile_conf(None)
- def run(self, command, win_bash=False, subsystem=None, env=None):
+ def run(self, command, win_bash=False, subsystem=None, env=None, ignore_errors=False):
assert win_bash is False
assert subsystem is None
self.command = command
| diff --git a/conans/requirements.txt b/conans/requirements.txt
index 2b43d530f41..3ee8a2a5719 100644
--- a/conans/requirements.txt
+++ b/conans/requirements.txt
@@ -7,7 +7,7 @@ patch-ng>=1.17.4, <1.18
fasteners>=0.14.1
six>=1.10.0,<=1.16.0
node-semver==0.6.1
-distro>=1.0.2, <=1.6.0
+distro>=1.0.2, <=1.6.0; sys_platform == 'linux' or sys_platform == 'linux2'
pygments>=2.0, <3.0
tqdm>=4.28.1, <5
Jinja2>=2.9, <4.0.0
| [
{
"components": [
{
"doc": "",
"lines": [
7,
121
],
"name": "_SystemPackageManagerTool",
"signature": "class _SystemPackageManagerTool(object):",
"type": "class"
},
{
"doc": "",
"lines": [
15,
... | [
"conans/test/functional/tools/system/package_manager_test.py::test_apt_check",
"conans/test/functional/tools/system/package_manager_test.py::test_build_require",
"conans/test/integration/tools/system/package_manager_test.py::test_package_manager_platform[Linux-apt-get]",
"conans/test/integration/tools/system/... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Redesign SystemPackageTool interface
Changelog: Feature: New tools in `conan.tools.system` for invoking system package managers in recipes.
Docs: https://github.com/conan-io/docs/pull/2379
To discuss:
- Check if we want to remove the guard to not running twice the system_requirements method. Add a conf for this?
- Taking into account build_requires
- Adding distro -> pm mapping
...
Closes: https://github.com/conan-io/conan/issues/1659
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/system/package_manager.py]
(definition of _SystemPackageManagerTool:)
class _SystemPackageManagerTool(object):
(definition of _SystemPackageManagerTool.__init__:)
def __init__(self, conanfile):
(definition of _SystemPackageManagerTool.get_default_tool:)
def get_default_tool(self):
(definition of _SystemPackageManagerTool.get_package_name:)
def get_package_name(self, package):
(definition of _SystemPackageManagerTool.sudo_str:)
def sudo_str(self):
(definition of _SystemPackageManagerTool.run:)
def run(self, method, *args, **kwargs):
(definition of _SystemPackageManagerTool.install:)
def install(self, *args, **kwargs):
(definition of _SystemPackageManagerTool.update:)
def update(self, *args, **kwargs):
(definition of _SystemPackageManagerTool.check:)
def check(self, *args, **kwargs):
(definition of _SystemPackageManagerTool._install:)
def _install(self, packages, update=False, check=True, **kwargs):
(definition of _SystemPackageManagerTool._update:)
def _update(self):
(definition of _SystemPackageManagerTool._check:)
def _check(self, packages):
(definition of _SystemPackageManagerTool.check_package:)
def check_package(self, package):
(definition of Apt:)
class Apt(_SystemPackageManagerTool):
(definition of Apt.__init__:)
def __init__(self, conanfile, arch_names=None):
(definition of Apt.install:)
def install(self, packages, update=False, check=False, recommends=False):
(definition of Yum:)
class Yum(_SystemPackageManagerTool):
(definition of Yum.__init__:)
def __init__(self, conanfile, arch_names=None):
(definition of Dnf:)
class Dnf(Yum):
(definition of Brew:)
class Brew(_SystemPackageManagerTool):
(definition of Pkg:)
class Pkg(_SystemPackageManagerTool):
(definition of PkgUtil:)
class PkgUtil(_SystemPackageManagerTool):
(definition of Chocolatey:)
class Chocolatey(_SystemPackageManagerTool):
(definition of PacMan:)
class PacMan(_SystemPackageManagerTool):
(definition of PacMan.__init__:)
def __init__(self, conanfile, arch_names=None):
(definition of Zypper:)
class Zypper(_SystemPackageManagerTool):
[end of new definitions in conan/tools/system/package_manager.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | |
Textualize__textual-214 | 214 | Textualize/textual | null | d7bcd0093809a8ef33db2cb905ca09c724afcc7f | 2022-01-18T17:33:59Z | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index e2c607c2f5..e26fb347e6 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -11,3 +11,4 @@ repos:
rev: 21.8b0
hooks:
- id: black
+ exclude: ^tests/
diff --git a/Makefile b/Makefile
index 80cde06d37..16489186e4 100644
--- a/Makefile
+++ b/Makefile
@@ -5,4 +5,4 @@ typecheck:
format:
black src
format-check:
- black --check .
+ black --check src
diff --git a/pyproject.toml b/pyproject.toml
index 0572f12afa..fdf571d96e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -17,7 +17,6 @@ classifiers = [
"Programming Language :: Python :: 3.10",
]
-
[tool.poetry.dependencies]
python = "^3.7"
rich = "^10.12.0"
@@ -25,7 +24,6 @@ rich = "^10.12.0"
typing-extensions = { version = "^3.10.0", python = "<3.8" }
[tool.poetry.dev-dependencies]
-
pytest = "^6.2.3"
black = "^21.11b1"
mypy = "^0.910"
@@ -35,6 +33,9 @@ mkdocstrings = "^0.15.2"
mkdocs-material = "^7.1.10"
pre-commit = "^2.13.0"
+[tool.black]
+includes = "src"
+
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
diff --git a/src/textual/_duration.py b/src/textual/_duration.py
new file mode 100644
index 0000000000..c7479167be
--- /dev/null
+++ b/src/textual/_duration.py
@@ -0,0 +1,47 @@
+import re
+
+_match_duration = re.compile(r"^(-?\d+\.?\d*)(s|ms)$").match
+
+
+class DurationError(Exception):
+ """
+ Exception indicating a general issue with a CSS duration.
+ """
+
+
+class DurationParseError(DurationError):
+ """
+ Indicates a malformed duration string that could not be parsed.
+ """
+
+
+def _duration_as_seconds(duration: str) -> float:
+ """
+ Args:
+ duration (str): A string of the form ``"2s"`` or ``"300ms"``, representing 2 seconds and
+ 300 milliseconds respectively. If no unit is supplied, e.g. ``"2"``, then the duration is
+ assumed to be in seconds.
+ Raises:
+ DurationParseError: If the argument ``duration`` is not a valid duration string.
+ Returns:
+ float: The duration in seconds.
+
+ """
+ match = _match_duration(duration)
+
+ if match:
+ value, unit_name = match.groups()
+ value = float(value)
+ if unit_name == "ms":
+ duration_secs = value / 1000
+ else:
+ duration_secs = value
+ else:
+ try:
+ duration_secs = float(duration)
+ except ValueError:
+ raise DurationParseError(
+ f"{duration!r} is not a valid duration."
+ ) from ValueError
+
+ return duration_secs
diff --git a/src/textual/css/_style_properties.py b/src/textual/css/_style_properties.py
index a1c42d2dd8..3ed32ee8be 100644
--- a/src/textual/css/_style_properties.py
+++ b/src/textual/css/_style_properties.py
@@ -55,7 +55,6 @@ def __get__(
def __set__(
self, obj: Styles, value: float | Scalar | str | None
) -> float | Scalar | str | None:
- new_value: Scalar | None = None
if value is None:
new_value = None
elif isinstance(value, float):
diff --git a/src/textual/css/_styles_builder.py b/src/textual/css/_styles_builder.py
index 560adf323b..17349a04f7 100644
--- a/src/textual/css/_styles_builder.py
+++ b/src/textual/css/_styles_builder.py
@@ -1,11 +1,3 @@
-"""
-
-The StylesBuilder object takes tokens parsed from the CSS and converts
-to the appropriate internal types.
-
-
-"""
-
from __future__ import annotations
from typing import cast, Iterable, NoReturn
@@ -17,6 +9,7 @@
from .constants import VALID_BORDER, VALID_EDGE, VALID_DISPLAY, VALID_VISIBILITY
from .errors import DeclarationError
from ._error_tools import friendly_list
+from .._duration import _duration_as_seconds
from .._easing import EASING
from ..geometry import Spacing, SpacingDimensions
from .model import Declaration
@@ -28,6 +21,11 @@
class StylesBuilder:
+ """
+ The StylesBuilder object takes tokens parsed from the CSS and converts
+ to the appropriate internal types.
+ """
+
def __init__(self) -> None:
self.styles = Styles()
@@ -240,14 +238,21 @@ def process_offset(self, name: str, tokens: list[Token], important: bool) -> Non
if not tokens:
return
if len(tokens) != 2:
- self.error(name, tokens[0], "expected two numbers in declaration")
+ self.error(
+ name, tokens[0], "expected two scalars or numbers in declaration"
+ )
else:
token1, token2 = tokens
- if token1.name != "scalar":
- self.error(name, token1, f"expected a scalar; found {token1.value!r}")
- if token2.name != "scalar":
- self.error(name, token2, f"expected a scalar; found {token1.value!r}")
+ if token1.name not in ("scalar", "number"):
+ self.error(
+ name, token1, f"expected a scalar or number; found {token1.value!r}"
+ )
+ if token2.name not in ("scalar", "number"):
+ self.error(
+ name, token2, f"expected a scalar or number; found {token2.value!r}"
+ )
+
scalar_x = Scalar.parse(token1.value, Unit.WIDTH)
scalar_y = Scalar.parse(token2.value, Unit.HEIGHT)
self.styles._rule_offset = ScalarOffset(scalar_x, scalar_y)
@@ -259,7 +264,7 @@ def process_offset_x(self, name: str, tokens: list[Token], important: bool) -> N
self.error(name, tokens[0], f"expected a single number")
else:
token = tokens[0]
- if token.name != "scalar":
+ if token.name not in ("scalar", "number"):
self.error(name, token, f"expected a scalar; found {token.value!r}")
x = Scalar.parse(token.value, Unit.WIDTH)
y = self.styles.offset.y
@@ -272,7 +277,7 @@ def process_offset_y(self, name: str, tokens: list[Token], important: bool) -> N
self.error(name, tokens[0], f"expected a single number")
else:
token = tokens[0]
- if token.name != "scalar":
+ if token.name not in ("scalar", "number"):
self.error(name, token, f"expected a scalar; found {token.value!r}")
y = Scalar.parse(token.value, Unit.HEIGHT)
x = self.styles.offset.x
@@ -394,15 +399,8 @@ def process_transition(
) -> None:
transitions: dict[str, Transition] = {}
- css_property = ""
- duration = 1.0
- easing = "linear"
- delay = 0.0
-
- iter_tokens = iter(tokens)
-
def make_groups() -> Iterable[list[Token]]:
- """Batch tokens in to comma-separated groups."""
+ """Batch tokens into comma-separated groups."""
group: list[Token] = []
for token in tokens:
if token.name == "comma":
@@ -414,6 +412,7 @@ def make_groups() -> Iterable[list[Token]]:
if group:
yield group
+ valid_duration_token_names = ("duration", "number")
for tokens in make_groups():
css_property = ""
duration = 1.0
@@ -425,13 +424,13 @@ def make_groups() -> Iterable[list[Token]]:
token = next(iter_tokens)
if token.name != "token":
self.error(name, token, "expected property")
- css_property = token.value
+ css_property = token.value
token = next(iter_tokens)
- if token.name != "scalar":
- self.error(name, token, "expected time")
+ if token.name not in valid_duration_token_names:
+ self.error(name, token, "expected duration or number")
try:
- duration = Scalar.parse(token.value).resolve_time()
+ duration = _duration_as_seconds(token.value)
except ScalarError as error:
self.error(name, token, str(error))
@@ -448,10 +447,10 @@ def make_groups() -> Iterable[list[Token]]:
easing = token.value
token = next(iter_tokens)
- if token.name != "scalar":
- self.error(name, token, "expected time")
+ if token.name not in valid_duration_token_names:
+ self.error(name, token, "expected duration or number")
try:
- delay = Scalar.parse(token.value).resolve_time()
+ delay = _duration_as_seconds(token.value)
except ScalarError as error:
self.error(name, token, str(error))
except StopIteration:
diff --git a/src/textual/css/parse.py b/src/textual/css/parse.py
index ba0bc67832..9062df7696 100644
--- a/src/textual/css/parse.py
+++ b/src/textual/css/parse.py
@@ -82,8 +82,6 @@ def parse_selectors(css_selectors: str) -> tuple[SelectorSet, ...]:
def parse_rule_set(tokens: Iterator[Token], token: Token) -> Iterable[RuleSet]:
- rule_set = RuleSet()
-
get_selector = SELECTOR_MAP.get
combinator: CombinatorType | None = CombinatorType.DESCENDENT
selectors: list[Selector] = []
@@ -187,8 +185,8 @@ def parse_declarations(css: str, path: str) -> Styles:
try:
styles_builder.add_declaration(declaration)
except DeclarationError as error:
- raise
errors.append((error.token, error.message))
+ raise
declaration = Declaration(token, "")
declaration.name = token.value.rstrip(":")
elif token_name == "declaration_set_end":
@@ -201,8 +199,8 @@ def parse_declarations(css: str, path: str) -> Styles:
try:
styles_builder.add_declaration(declaration)
except DeclarationError as error:
- raise
errors.append((error.token, error.message))
+ raise
return styles_builder.styles
@@ -257,9 +255,21 @@ def parse(css: str, path: str) -> Iterable[RuleSet]:
if __name__ == "__main__":
print(parse_selectors("Foo > Bar.baz { foo: bar"))
- CSS = """
-text: on red;
-docksX: main=top;
- """
-
- print(parse_declarations(CSS, "foo"))
+ css = """#something {
+ text: on red;
+ transition: offset 5.51s in_out_cubic;
+ offset-x: 100%;
+}
+"""
+
+ from textual.css.stylesheet import Stylesheet, StylesheetParseError
+ from rich.console import Console
+
+ console = Console()
+ stylesheet = Stylesheet()
+ try:
+ stylesheet.parse(css)
+ except StylesheetParseError as e:
+ console.print(e.errors)
+ print(stylesheet)
+ print(stylesheet.css)
diff --git a/src/textual/css/scalar.py b/src/textual/css/scalar.py
index 0f3c792e6e..eb5a52acc3 100644
--- a/src/textual/css/scalar.py
+++ b/src/textual/css/scalar.py
@@ -6,6 +6,7 @@
import rich.repr
+from textual.css.tokenizer import Token
from ..geometry import Offset
@@ -30,8 +31,6 @@ class Unit(Enum):
HEIGHT = 5
VIEW_WIDTH = 6
VIEW_HEIGHT = 7
- MILLISECONDS = 8
- SECONDS = 9
UNIT_SYMBOL = {
@@ -42,13 +41,11 @@ class Unit(Enum):
Unit.HEIGHT: "h",
Unit.VIEW_WIDTH: "vw",
Unit.VIEW_HEIGHT: "vh",
- Unit.MILLISECONDS: "ms",
- Unit.SECONDS: "s",
}
SYMBOL_UNIT = {v: k for k, v in UNIT_SYMBOL.items()}
-_MATCH_SCALAR = re.compile(r"^(\-?\d+\.?\d*)(fr|%|w|h|vw|vh|s|ms)?$").match
+_MATCH_SCALAR = re.compile(r"^(-?\d+\.?\d*)(fr|%|w|h|vw|vh)?$").match
RESOLVE_MAP = {
@@ -142,14 +139,6 @@ def resolve_dimension(
except KeyError:
raise ScalarResolveError(f"expected dimensions; found {str(self)!r}")
- def resolve_time(self) -> float:
- value, unit, _ = self
- if unit == Unit.MILLISECONDS:
- return value / 1000.0
- elif unit == Unit.SECONDS:
- return value
- raise ScalarResolveError(f"expected time; found {str(self)!r}")
-
@rich.repr.auto(angular=True)
class ScalarOffset(NamedTuple):
diff --git a/src/textual/css/stylesheet.py b/src/textual/css/stylesheet.py
index 77e4ab28ac..b7a5964919 100644
--- a/src/textual/css/stylesheet.py
+++ b/src/textual/css/stylesheet.py
@@ -3,9 +3,8 @@
from collections import defaultdict
from operator import itemgetter
import os
-from typing import Iterable, TYPE_CHECKING
+from typing import Iterable
-from rich.console import RenderableType
import rich.repr
from rich.highlighter import ReprHighlighter
from rich.panel import Panel
@@ -86,11 +85,11 @@ def read(self, filename: str) -> None:
css = css_file.read()
path = os.path.abspath(filename)
except Exception as error:
- raise StylesheetError(f"unable to read {filename!r}; {error}") from None
+ raise StylesheetError(f"unable to read {filename!r}; {error}")
try:
rules = list(parse(css, path))
except Exception as error:
- raise StylesheetError(f"failed to parse {filename!r}; {error}") from None
+ raise StylesheetError(f"failed to parse {filename!r}; {error}")
self.rules.extend(rules)
def parse(self, css: str, *, path: str = "") -> None:
diff --git a/src/textual/css/tokenize.py b/src/textual/css/tokenize.py
index 80e57f5d21..2aae666e1a 100644
--- a/src/textual/css/tokenize.py
+++ b/src/textual/css/tokenize.py
@@ -1,10 +1,10 @@
from __future__ import annotations
+
+import pprint
import re
from typing import Iterable
-from rich import print
-
-from .tokenizer import Expect, Tokenizer, Token
+from textual.css.tokenizer import Expect, Tokenizer, Token
expect_selector = Expect(
@@ -51,7 +51,9 @@
declaration_end=r"\n|;",
whitespace=r"\s+",
comment_start=r"\/\*",
- scalar=r"\-?\d+\.?\d*(?:fr|%|w|h|vw|vh|s|ms)?",
+ scalar=r"\-?\d+\.?\d*(?:fr|%|w|h|vw|vh)",
+ duration=r"\d+\.?\d*(?:ms|s)",
+ number=r"\-?\d+\.?\d*",
color=r"\#[0-9a-fA-F]{6}|color\([0-9]{1,3}\)|rgb\(\d{1,3}\,\s?\d{1,3}\,\s?\d{1,3}\)",
key_value=r"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\-\/]+",
token="[a-zA-Z_-]+",
@@ -124,3 +126,13 @@ class DeclarationTokenizerState(TokenizerState):
# break
# expect = get_state(name, expect)
# yield token
+
+if __name__ == "__main__":
+ css = """#something {
+ text: on red;
+ offset-x: 10;
+ }
+ """
+ # transition: offset 500 in_out_cubic;
+ tokens = tokenize(css, __name__)
+ pprint.pp(list(tokens))
| diff --git a/tests/test_css_parse.py b/tests/test_css_parse.py
new file mode 100644
index 0000000000..f17a0c3e94
--- /dev/null
+++ b/tests/test_css_parse.py
@@ -0,0 +1,129 @@
+import pytest
+from rich.color import Color, ColorType
+
+from textual.css.scalar import Scalar, Unit
+from textual.css.stylesheet import Stylesheet, StylesheetParseError
+from textual.css.transition import Transition
+
+
+class TestParseText:
+ def test_foreground(self):
+ css = """#some-widget {
+ text: green;
+ }
+ """
+ stylesheet = Stylesheet()
+ stylesheet.parse(css)
+
+ styles = stylesheet.rules[0].styles
+ assert styles.text_color == Color.parse("green")
+
+ def test_background(self):
+ css = """#some-widget {
+ text: on red;
+ }
+ """
+ stylesheet = Stylesheet()
+ stylesheet.parse(css)
+
+ styles = stylesheet.rules[0].styles
+ assert styles.text_background == Color("red", type=ColorType.STANDARD, number=1)
+
+
+class TestParseOffset:
+ @pytest.mark.parametrize("offset_x, parsed_x, offset_y, parsed_y", [
+ ["-5.5%", Scalar(-5.5, Unit.PERCENT, Unit.WIDTH), "-30%", Scalar(-30, Unit.PERCENT, Unit.HEIGHT)],
+ ["5%", Scalar(5, Unit.PERCENT, Unit.WIDTH), "40%", Scalar(40, Unit.PERCENT, Unit.HEIGHT)],
+ ["10", Scalar(10, Unit.CELLS, Unit.WIDTH), "40", Scalar(40, Unit.CELLS, Unit.HEIGHT)],
+ ])
+ def test_composite_rule(self, offset_x, parsed_x, offset_y, parsed_y):
+ css = f"""#some-widget {{
+ offset: {offset_x} {offset_y};
+ }}
+ """
+ stylesheet = Stylesheet()
+ stylesheet.parse(css)
+
+ styles = stylesheet.rules[0].styles
+
+ assert len(stylesheet.rules) == 1
+ assert stylesheet.rules[0].errors == []
+ assert styles.offset.x == parsed_x
+ assert styles.offset.y == parsed_y
+
+ @pytest.mark.parametrize("offset_x, parsed_x, offset_y, parsed_y", [
+ ["-5.5%", Scalar(-5.5, Unit.PERCENT, Unit.WIDTH), "-30%", Scalar(-30, Unit.PERCENT, Unit.HEIGHT)],
+ ["5%", Scalar(5, Unit.PERCENT, Unit.WIDTH), "40%", Scalar(40, Unit.PERCENT, Unit.HEIGHT)],
+ ["-10", Scalar(-10, Unit.CELLS, Unit.WIDTH), "40", Scalar(40, Unit.CELLS, Unit.HEIGHT)],
+ ])
+ def test_separate_rules(self, offset_x, parsed_x, offset_y, parsed_y):
+ css = f"""#some-widget {{
+ offset-x: {offset_x};
+ offset-y: {offset_y};
+ }}
+ """
+ stylesheet = Stylesheet()
+ stylesheet.parse(css)
+
+ styles = stylesheet.rules[0].styles
+
+ assert len(stylesheet.rules) == 1
+ assert stylesheet.rules[0].errors == []
+ assert styles.offset.x == parsed_x
+ assert styles.offset.y == parsed_y
+
+
+class TestParseTransition:
+ @pytest.mark.parametrize(
+ "duration, parsed_duration", [
+ ["5.57s", 5.57],
+ ["0.5s", 0.5],
+ ["1200ms", 1.2],
+ ["0.5ms", 0.0005],
+ ["20", 20.],
+ ["0.1", 0.1],
+ ]
+ )
+ def test_various_duration_formats(self, duration, parsed_duration):
+ easing = "in_out_cubic"
+ transition_property = "offset"
+ css = f"""#some-widget {{
+ transition: {transition_property} {duration} {easing} {duration};
+ }}
+ """
+ stylesheet = Stylesheet()
+ stylesheet.parse(css)
+
+ styles = stylesheet.rules[0].styles
+
+ assert len(stylesheet.rules) == 1
+ assert stylesheet.rules[0].errors == []
+ assert styles.transitions == {
+ "offset": Transition(duration=parsed_duration, easing=easing, delay=parsed_duration)
+ }
+
+ def test_no_delay_specified(self):
+ css = f"#some-widget {{ transition: offset-x 1 in_out_cubic; }}"
+ stylesheet = Stylesheet()
+ stylesheet.parse(css)
+
+ styles = stylesheet.rules[0].styles
+
+ assert stylesheet.rules[0].errors == []
+ assert styles.transitions == {
+ "offset-x": Transition(duration=1, easing="in_out_cubic", delay=0)
+ }
+
+ def test_unknown_easing_function(self):
+ invalid_func_name = "invalid_easing_function"
+ css = f"#some-widget {{ transition: offset 1 {invalid_func_name} 1; }}"
+
+ stylesheet = Stylesheet()
+ with pytest.raises(StylesheetParseError) as ex:
+ stylesheet.parse(css)
+
+ stylesheet_errors = stylesheet.rules[0].errors
+
+ assert len(stylesheet_errors) == 1
+ assert stylesheet_errors[0][0].value == invalid_func_name
+ assert ex.value.errors is not None
| diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index e2c607c2f5..e26fb347e6 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -11,3 +11,4 @@ repos:
rev: 21.8b0
hooks:
- id: black
+ exclude: ^tests/
diff --git a/Makefile b/Makefile
index 80cde06d37..16489186e4 100644
--- a/Makefile
+++ b/Makefile
@@ -5,4 +5,4 @@ typecheck:
format:
black src
format-check:
- black --check .
+ black --check src
diff --git a/pyproject.toml b/pyproject.toml
index 0572f12afa..fdf571d96e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -17,7 +17,6 @@ classifiers = [
"Programming Language :: Python :: 3.10",
]
-
[tool.poetry.dependencies]
python = "^3.7"
rich = "^10.12.0"
@@ -25,7 +24,6 @@ rich = "^10.12.0"
typing-extensions = { version = "^3.10.0", python = "<3.8" }
[tool.poetry.dev-dependencies]
-
pytest = "^6.2.3"
black = "^21.11b1"
mypy = "^0.910"
@@ -35,6 +33,9 @@ mkdocstrings = "^0.15.2"
mkdocs-material = "^7.1.10"
pre-commit = "^2.13.0"
+[tool.black]
+includes = "src"
+
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
| [
{
"components": [
{
"doc": "Exception indicating a general issue with a CSS duration.",
"lines": [
6,
7
],
"name": "DurationError",
"signature": "class DurationError(Exception):",
"type": "class"
},
{
"doc": "Indicates... | [
"tests/test_css_parse.py::TestParseTransition::test_various_duration_formats[20-20.0]",
"tests/test_css_parse.py::TestParseTransition::test_various_duration_formats[0.1-0.1]",
"tests/test_css_parse.py::TestParseTransition::test_no_delay_specified",
"tests/test_css_parse.py::TestParseTransition::test_unknown_e... | [
"tests/test_css_parse.py::TestParseText::test_foreground",
"tests/test_css_parse.py::TestParseText::test_background",
"tests/test_css_parse.py::TestParseOffset::test_composite_rule[-5.5%-parsed_x0--30%-parsed_y0]",
"tests/test_css_parse.py::TestParseOffset::test_composite_rule[5%-parsed_x1-40%-parsed_y1]",
... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Splitting out parsing of durations into new token types, avoiding Scalar
{}
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/textual/_duration.py]
(definition of DurationError:)
class DurationError(Exception):
"""Exception indicating a general issue with a CSS duration."""
(definition of DurationParseError:)
class DurationParseError(DurationError):
"""Indicates a malformed duration string that could not be parsed."""
(definition of _duration_as_seconds:)
def _duration_as_seconds(duration: str) -> float:
"""Args:
duration (str): A string of the form ``"2s"`` or ``"300ms"``, representing 2 seconds and
300 milliseconds respectively. If no unit is supplied, e.g. ``"2"``, then the duration is
assumed to be in seconds.
Raises:
DurationParseError: If the argument ``duration`` is not a valid duration string.
Returns:
float: The duration in seconds."""
[end of new definitions in src/textual/_duration.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 86e93536b991014e0ea4bf993068202b446bb698 | |
sympy__sympy-22872 | 22,872 | sympy/sympy | 1.11 | e57dc59d377af3692fcc890876eb49b4f89f3679 | 2022-01-17T13:44:36Z | diff --git a/.mailmap b/.mailmap
index edf341362f5a..f64a28e9c044 100644
--- a/.mailmap
+++ b/.mailmap
@@ -64,8 +64,6 @@
*Marc-Etienne M.Leveille <protonyc@gmail.com>
*Ulrich Hecht <ulrich.hecht@gmail.com>
2torus <boris.ettinger@gmail.com>
-<felix.kaiser@fxkr.net> <kaiser.fx@gmail.com>
-<fredrik.johansson@gmail.com> <fredrik@airy.(none)>
Aadit Kamat <aadit.k12@gmail.com>
Aaditya Nair <aadityanair6494@gmail.com>
Aaron Meurer <asmeurer@gmail.com>
@@ -173,7 +171,6 @@ Andrey Lekar <andrey_lekar@adoriasoft.com> blackyblack <andrey_lekar@adoriasoft.
Andy R. Terrel <aterrel@uchicago.edu> <andy.terrel@gmail.com>
Andy R. Terrel <aterrel@uchicago.edu> <aterrel@enthought.com>
Andy R. Terrel <aterrel@uchicago.edu> <aterrel@uchicago.edu>
-Andy R. Terrel <aterrel@uchicago.edu> <aterrel@uchicago.edu>
Angad Sandhu <55819847+angadsinghsandhu@users.noreply.github.com>
Angadh Nanjangud <angadh.n@gmail.com>
Angus Griffith <16sn6uv@gmail.com>
@@ -400,6 +397,7 @@ Faisal Anees <faisal.iiit@gmail.com>
Faisal Riyaz <faisalriyaz011@gmail.com>
Fawaz Alazemi <Mba7eth@gmail.com>
Felix Kaiser <felix.kaiser@fxkr.net> <felix.kaiser@fxkr.net>
+Felix Kaiser <felix.kaiser@fxkr.net> <kaiser.fx@gmail.com>
Felix Kaiser <felix.kaiser@fxkr.net> <whatfxkr@gmail.com>
Felix Yan <felixonmars@archlinux.org>
Fermi Paradox <FermiParadox@users.noreply.github.com>
@@ -413,7 +411,9 @@ Francesco Bonazzi <franz.bonazzi@gmail.com> <none@universe.org>
Freddie Witherden <freddie@witherden.org>
Fredrik Andersson <fredrik.andersson@fcc.chalmers.se>
Fredrik Eriksson <freeriks@student.chalmers.se>
-Fredrik Johansson <fredrik.johansson@gmail.com>
+Fredrik Johansson <fredrik.johansson@gmail.com> <fredrik.johansson@gmail.com>
+Fredrik Johansson <fredrik.johansson@gmail.com> <fredrik.johansson@gmail.com> <fredrik@airy.(none)>
+Fredrik Johansson <fredrik.johansson@gmail.com> <fredrik@airy.(none)>
Fredrik Johansson <fredrik.johansson@gmail.com> fredrik.johansson <devnull@localhost>
Friedrich Hagedorn <friedrich_h@gmx.de>
Frédéric Chapoton <fchapoton2@gmail.com> fchapoton <fchapoton2@gmail.com>
@@ -1024,6 +1024,7 @@ Sebastian Krause <sebastian.krause@gmx.de>
Sebastian Kreft <skreft@gmail.com>
Sebastian Krämer <basti.kr@gmail.com> basti.kr <devnull@localhost>
Segev Finer <segev208@gmail.com>
+Sergey B Kirpichev <skirpichev@gmail.com>
Sergey Pestov <pestov-sa@yandex.ru>
Sergiu Ivanov <unlimitedscolobb@gmail.com>
Seshagiri Prabhu <seshagiriprabhu@gmail.com>
@@ -1057,6 +1058,8 @@ Shubham Tibra <shubh.tibra@gmail.com>
Siddhanathan Shanmugam <siddhanathan@gmail.com>
Siddhanathan Shanmugam <siddhanathan@gmail.com> <siddhu@siddhu-laptop.(none)>
Siddhant Jain <getsiddhant@gmail.com>
+Siddhant Jain <siddhantashoknagar@gmail.com> Siddhant Jain <77455093+me-t1me@users.noreply.github.com>
+Siddhant Jain <siddhantashoknagar@gmail.com> me-t1me <siddhantashoknagar@gmail.com>
Sidhant Nagpal <sidhantnagpal97@gmail.com> Sidhant Nagpal <36465988+sidhantnagpal@users.noreply.github.com>
Sidhant Nagpal <sidhantnagpal97@gmail.com> sidhantnagpal <sidhantnagpal97@gmail.com>
Sidhant Nagpal <sidhantnagpal97@gmail.com> “sidhantnagpal” <sidhantnagpal97@gmail.com>
diff --git a/doc/src/modules/core.rst b/doc/src/modules/core.rst
index 0ade07ed6df3..72bf23cb9f77 100644
--- a/doc/src/modules/core.rst
+++ b/doc/src/modules/core.rst
@@ -519,6 +519,11 @@ Tuple
.. autoclass:: Tuple
:members:
+TupleKind
+^^^^^^^^^
+.. autoclass:: TupleKind
+ :members:
+
Dict
^^^^
.. autoclass:: Dict
@@ -550,6 +555,11 @@ NumberKind
.. autoclass:: NumberKind
:members:
+UndefinedKind
+^^^^^^^^^^^^^
+.. autoclass:: UndefinedKind
+ :members:
+
BooleanKind
^^^^^^^^^^^
.. autoclass:: BooleanKind
diff --git a/doc/src/modules/matrices/common.rst b/doc/src/modules/matrices/common.rst
index 5b0d358749d9..0b0226d9e455 100644
--- a/doc/src/modules/matrices/common.rst
+++ b/doc/src/modules/matrices/common.rst
@@ -9,3 +9,7 @@ MatrixCommon Class Reference
:members:
:special-members:
:inherited-members:
+
+MatrixKind
+----------
+.. autoclass:: MatrixKind
\ No newline at end of file
diff --git a/doc/src/modules/sets.rst b/doc/src/modules/sets.rst
index 0bfd37c81864..5d105f942d19 100644
--- a/doc/src/modules/sets.rst
+++ b/doc/src/modules/sets.rst
@@ -58,6 +58,11 @@ SymmetricDifference
.. autoclass:: SymmetricDifference
:members:
+DisjointUnion
+^^^^^^^^^^^^^
+.. autoclass:: DisjointUnion
+ :members:
+
Singleton Sets
--------------
@@ -75,6 +80,11 @@ Special Sets
------------
.. automodule:: sympy.sets.fancysets
+Rationals
+^^^^^^^^^
+.. autoclass:: Rationals
+ :members:
+
Naturals
^^^^^^^^
.. autoclass:: Naturals
@@ -174,3 +184,9 @@ Relations on sets
.. autoclass:: Contains
:members:
+
+SetKind
+--------------
+
+.. autoclass:: SetKind
+ :members:
\ No newline at end of file
diff --git a/sympy/core/containers.py b/sympy/core/containers.py
index aabcedfa5bf1..34c389b7d6cd 100644
--- a/sympy/core/containers.py
+++ b/sympy/core/containers.py
@@ -13,6 +13,7 @@
from .basic import Basic
from .sorting import default_sort_key, ordered
from .sympify import _sympify, sympify, _sympy_converter, SympifyError
+from sympy.core.kind import Kind
from sympy.utilities.iterables import iterable
from sympy.utilities.misc import as_int
@@ -144,6 +145,32 @@ def index(self, value, start=None, stop=None):
else:
return self.args.index(value, start, stop)
+ @property
+ def kind(self):
+ """
+ The kind of a Tuple instance.
+
+ The kind of a Tuple is always of :class:`TupleKind` but
+ parametrised by the number of elements and the kind of each element.
+
+ Examples
+ ========
+
+ >>> from sympy import Tuple, Matrix
+ >>> Tuple(1, 2).kind
+ TupleKind(NumberKind, NumberKind)
+ >>> Tuple(Matrix([1, 2]), 1).kind
+ TupleKind(MatrixKind(NumberKind), NumberKind)
+ >>> Tuple(1, 2).kind.element_kind
+ (NumberKind, NumberKind)
+
+ See Also
+ ========
+
+ sympy.matrices.common.MatrixKind
+ sympy.core.kind.NumberKind
+ """
+ return TupleKind(*(i.kind for i in self.args))
_sympy_converter[tuple] = lambda tup: Tuple(*tup)
@@ -351,3 +378,41 @@ def difference(self, other):
def update(self, iterable):
for val in iterable:
self.add(val)
+
+class TupleKind(Kind):
+ """
+ TupleKind is a subclass of Kind, which is used to define Kind of ``Tuple``.
+
+ Parameters of TupleKind will be kinds of all the arguments in Tuples, for
+ example
+
+ Parameters
+ ==========
+
+ args : tuple(element_kind)
+ element_kind is kind of element.
+ args is tuple of kinds of element
+
+ Examples
+ ========
+
+ >>> from sympy import Tuple
+ >>> Tuple(1, 2).kind
+ TupleKind(NumberKind, NumberKind)
+ >>> Tuple(1, 2).kind.element_kind
+ (NumberKind, NumberKind)
+
+ See Also
+ ========
+
+ sympy.core.kind.NumberKind
+ MatrixKind
+ sympy.sets.sets.SetKind
+ """
+ def __new__(cls, *args):
+ obj = super().__new__(cls, *args)
+ obj.element_kind = args
+ return obj
+
+ def __repr__(self):
+ return "TupleKind{}".format(self.element_kind)
diff --git a/sympy/matrices/common.py b/sympy/matrices/common.py
index 1a11aca938c9..65d1876443ad 100644
--- a/sympy/matrices/common.py
+++ b/sympy/matrices/common.py
@@ -3149,13 +3149,14 @@ class MatrixKind(Kind):
==========
element_kind : Kind
- Kind of the element. Default is :obj:NumberKind `<sympy.core.kind.NumberKind>`,
+ Kind of the element. Default is
+ :class:`sympy.core.kind.NumberKind`,
which means that the matrix contains only numbers.
Examples
========
- Any instance of matrix class has ``MatrixKind``.
+ Any instance of matrix class has ``MatrixKind``:
>>> from sympy import MatrixSymbol
>>> A = MatrixSymbol('A', 2,2)
@@ -3163,7 +3164,7 @@ class MatrixKind(Kind):
MatrixKind(NumberKind)
Although expression representing a matrix may be not instance of
- matrix class, it will have ``MatrixKind`` as well.
+ matrix class, it will have ``MatrixKind`` as well:
>>> from sympy import MatrixExpr, Integral
>>> from sympy.abc import x
@@ -3173,8 +3174,8 @@ class MatrixKind(Kind):
>>> intM.kind
MatrixKind(NumberKind)
- Use ``isinstance()`` to check for ``MatrixKind` without specifying
- the element kind. Use ``is`` with specifying the element kind.
+ Use ``isinstance()`` to check for ``MatrixKind`` without specifying
+ the element kind. Use ``is`` with specifying the element kind:
>>> from sympy import Matrix
>>> from sympy.core import NumberKind
@@ -3188,7 +3189,10 @@ class MatrixKind(Kind):
See Also
========
- shape : Function to return the shape of objects with ``MatrixKind``.
+ sympy.core.kind.NumberKind
+ sympy.core.kind.UndefinedKind
+ sympy.core.containers.TupleKind
+ sympy.sets.sets.SetKind
"""
def __new__(cls, element_kind=NumberKind):
diff --git a/sympy/matrices/expressions/sets.py b/sympy/matrices/expressions/sets.py
index 2f2a3101b68f..90816c684def 100644
--- a/sympy/matrices/expressions/sets.py
+++ b/sympy/matrices/expressions/sets.py
@@ -1,7 +1,9 @@
from sympy.core.assumptions import check_assumptions
from sympy.core.logic import fuzzy_and
from sympy.core.sympify import _sympify
-from sympy.sets.sets import Set
+from sympy.matrices.common import MatrixKind
+from sympy.sets.sets import Set, SetKind
+from sympy.core.kind import NumberKind
from .matexpr import MatrixExpr
@@ -60,3 +62,6 @@ def _check_dim(cls, dim):
raise ValueError(
"The dimension specification {} should be "
"a nonnegative integer.".format(dim))
+
+ def _kind(self):
+ return SetKind(MatrixKind(NumberKind))
diff --git a/sympy/sets/conditionset.py b/sympy/sets/conditionset.py
index fe24bb08631a..1036b29d664a 100644
--- a/sympy/sets/conditionset.py
+++ b/sympy/sets/conditionset.py
@@ -10,7 +10,7 @@
from sympy.utilities.iterables import sift, flatten, has_dups
from sympy.utilities.exceptions import SymPyDeprecationWarning
from .contains import Contains
-from .sets import Set, Union, FiniteSet
+from .sets import Set, Union, FiniteSet, SetKind
adummy = Dummy('conditionset')
@@ -231,3 +231,6 @@ def _eval_subs(self, old, new):
else:
pass # let error about the symbol raise from __new__
return self.func(sym, cond, base)
+
+ def _kind(self):
+ return SetKind(self.sym.kind)
diff --git a/sympy/sets/fancysets.py b/sympy/sets/fancysets.py
index 40d61f031676..cf3e5c67928e 100644
--- a/sympy/sets/fancysets.py
+++ b/sympy/sets/fancysets.py
@@ -9,11 +9,12 @@
from sympy.core.mod import Mod
from sympy.core.numbers import oo, igcd, Rational
from sympy.core.relational import Eq, is_eq
+from sympy.core.kind import NumberKind
from sympy.core.singleton import Singleton, S
from sympy.core.symbol import Dummy, symbols, Symbol
from sympy.core.sympify import _sympify, sympify, _sympy_converter
from sympy.logic.boolalg import And, Or
-from .sets import Set, Interval, Union, FiniteSet, ProductSet
+from .sets import Set, Interval, Union, FiniteSet, ProductSet, SetKind
from sympy.utilities.misc import filldedent
@@ -62,6 +63,9 @@ def __iter__(self):
def _boundary(self):
return S.Reals
+ def _kind(self):
+ return SetKind(NumberKind)
+
class Naturals(Set, metaclass=Singleton):
"""
@@ -126,6 +130,9 @@ def as_relational(self, x):
from sympy.functions.elementary.integers import floor
return And(Eq(floor(x), x), x >= self.inf, x < oo)
+ def _kind(self):
+ return SetKind(NumberKind)
+
class Naturals0(Naturals):
"""Represents the whole numbers which are all the non-negative integers,
@@ -214,6 +221,9 @@ def _sup(self):
def _boundary(self):
return self
+ def _kind(self):
+ return SetKind(NumberKind)
+
def as_relational(self, x):
from sympy.functions.elementary.integers import floor
return And(Eq(floor(x), x), -oo < x, x < oo)
@@ -501,6 +511,9 @@ def doit(self, **kwargs):
return FiniteSet(*(f(*a) for a in product(*self.base_sets)))
return self
+ def _kind(self):
+ return SetKind(self.lamda.expr.kind)
+
class Range(Set):
"""
@@ -691,6 +704,9 @@ def reversed(self):
return self.func(
self.stop - self.step, self.start - self.step, -self.step)
+ def _kind(self):
+ return SetKind(NumberKind)
+
def _contains(self, other):
if self.start == self.stop:
return S.false
@@ -1310,6 +1326,9 @@ def _measure(self):
"""
return self.sets._measure
+ def _kind(self):
+ return self.args[0].kind
+
@classmethod
def from_real(cls, sets):
"""
diff --git a/sympy/sets/powerset.py b/sympy/sets/powerset.py
index b576471e649d..2eb3b41b9859 100644
--- a/sympy/sets/powerset.py
+++ b/sympy/sets/powerset.py
@@ -4,7 +4,7 @@
from sympy.core.singleton import S
from sympy.core.sympify import _sympify
-from .sets import Set, FiniteSet
+from .sets import Set, FiniteSet, SetKind
class PowerSet(Set):
@@ -113,3 +113,7 @@ def __iter__(self):
yield new
temp.append(new)
found.extend(temp)
+
+ @property
+ def kind(self):
+ return SetKind(self.arg.kind)
diff --git a/sympy/sets/sets.py b/sympy/sets/sets.py
index 60f37903100a..67b4d42da424 100644
--- a/sympy/sets/sets.py
+++ b/sympy/sets/sets.py
@@ -3,8 +3,9 @@
from collections import defaultdict
import inspect
+from sympy.core.kind import Kind, UndefinedKind, NumberKind
from sympy.core.basic import Basic
-from sympy.core.containers import Tuple
+from sympy.core.containers import Tuple, TupleKind
from sympy.core.decorators import sympify_method_args, sympify_return
from sympy.core.evalf import EvalfMixin
from sympy.core.expr import Expr
@@ -534,6 +535,91 @@ def measure(self):
"""
return self._measure
+ @property
+ def kind(self):
+ """
+ The kind of a Set
+
+ Explanation
+ ===========
+
+ Any :class:`Set` will have kind :class:`SetKind` which is
+ parametrised by the kind of the elements of the set. For example
+ most sets are sets of numbers and will have kind
+ ``SetKind(NumberKind)``. If elements of sets are different in kind than
+ their kind will ``SetKind(UndefinedKind)``. See
+ :class:`sympy.core.kind.Kind` for an explanation of the kind system.
+
+ Examples
+ ========
+
+ >>> from sympy import Interval, Matrix, FiniteSet, EmptySet, ProductSet, PowerSet
+
+ >>> FiniteSet(Matrix([1, 2])).kind
+ SetKind(MatrixKind(NumberKind))
+
+ >>> Interval(1, 2).kind
+ SetKind(NumberKind)
+
+ >>> EmptySet.kind
+ SetKind()
+
+ A :class:`sympy.sets.powerset.PowerSet` is a set of sets:
+
+ >>> PowerSet({1, 2, 3}).kind
+ SetKind(SetKind(NumberKind))
+
+ A :class:`ProductSet` represents the set of tuples of elements of
+ other sets. Its kind is :class:`sympy.core.containers.TupleKind`
+ parametrised by the kinds of the elements of those sets:
+
+ >>> p = ProductSet(FiniteSet(1, 2), FiniteSet(3, 4))
+ >>> list(p)
+ [(1, 3), (2, 3), (1, 4), (2, 4)]
+ >>> p.kind
+ SetKind(TupleKind(NumberKind, NumberKind))
+
+ When all elements of the set do not have same kind, the kind
+ will be returned as ``SetKind(UndefinedKind)``:
+
+ >>> FiniteSet(0, Matrix([1, 2])).kind
+ SetKind(UndefinedKind)
+
+ The kind of the elements of a set are given by the ``element_kind``
+ attribute of ``SetKind``:
+
+ >>> Interval(1, 2).kind.element_kind
+ NumberKind
+
+ See Also
+ ========
+
+ NumberKind
+ sympy.core.kind.UndefinedKind
+ sympy.core.containers.TupleKind
+ MatrixKind
+ sympy.matrices.expressions.sets.MatrixSet
+ sympy.sets.conditionset.ConditionSet
+ Rationals
+ Naturals
+ Integers
+ sympy.sets.fancysets.ImageSet
+ sympy.sets.fancysets.Range
+ sympy.sets.fancysets.ComplexRegion
+ sympy.sets.powerset.PowerSet
+ sympy.sets.sets.ProductSet
+ sympy.sets.sets.Interval
+ sympy.sets.sets.Union
+ sympy.sets.sets.Intersection
+ sympy.sets.sets.Complement
+ sympy.sets.sets.EmptySet
+ sympy.sets.sets.UniversalSet
+ sympy.sets.sets.FiniteSet
+ sympy.sets.sets.SymmetricDifference
+ sympy.sets.sets.DisjointUnion
+ """
+ return self._kind()
+
@property
def boundary(self):
"""
@@ -652,6 +738,9 @@ def _boundary(self):
def _measure(self):
raise NotImplementedError("(%s)._measure" % self)
+ def _kind(self):
+ return SetKind(UndefinedKind)
+
def _eval_evalf(self, prec):
dps = prec_to_dps(prec)
return self.func(*[arg.evalf(n=dps) for arg in self.args])
@@ -867,6 +956,9 @@ def _measure(self):
measure *= s.measure
return measure
+ def _kind(self):
+ return SetKind(TupleKind(*(i.kind.element_kind for i in self.args)))
+
def __len__(self):
return reduce(lambda a, b: a*b, (len(s) for s in self.args))
@@ -1120,6 +1212,9 @@ def as_relational(self, x):
def _measure(self):
return self.end - self.start
+ def _kind(self):
+ return SetKind(NumberKind)
+
def to_mpi(self, prec=53):
return mpi(mpf(self.start._eval_evalf(prec)),
mpf(self.end._eval_evalf(prec)))
@@ -1281,6 +1376,15 @@ def _measure(self):
parity *= -1
return measure
+ def _kind(self):
+ kinds = tuple(arg.kind for arg in self.args if arg is not S.EmptySet)
+ if not kinds:
+ return SetKind()
+ elif all(i == kinds[0] for i in kinds):
+ return kinds[0]
+ else:
+ return SetKind(UndefinedKind)
+
@property
def _boundary(self):
def boundary_of_set(i):
@@ -1384,6 +1488,15 @@ def is_finite_set(self):
if fuzzy_or(arg.is_finite_set for arg in self.args):
return True
+ def _kind(self):
+ kinds = tuple(arg.kind for arg in self.args if arg is not S.UniversalSet)
+ if not kinds:
+ return SetKind(UndefinedKind)
+ elif all(i == kinds[0] for i in kinds):
+ return kinds[0]
+ else:
+ return SetKind()
+
@property
def _inf(self):
raise NotImplementedError()
@@ -1605,6 +1718,9 @@ def as_relational(self, symbol):
return And(A_rel, B_rel)
+ def _kind(self):
+ return self.args[0].kind
+
@property
def is_iterable(self):
if self.args[0].is_iterable:
@@ -1689,6 +1805,9 @@ def _boundary(self):
def _complement(self, other):
return other
+ def _kind(self):
+ return SetKind()
+
def _symmetric_difference(self, other):
return other
@@ -1733,6 +1852,9 @@ def _symmetric_difference(self, other):
def _measure(self):
return S.Infinity
+ def _kind(self):
+ return SetKind(UndefinedKind)
+
def _contains(self, other):
return true
@@ -1904,6 +2026,14 @@ def _sup(self):
def measure(self):
return 0
+ def _kind(self):
+ if not self.args:
+ return SetKind()
+ elif all(i.kind == self.args[0].kind for i in self.args):
+ return SetKind(self.args[0].kind)
+ else:
+ return SetKind(UndefinedKind)
+
def __len__(self):
return len(self.args)
@@ -2154,6 +2284,14 @@ def _contains(self, element):
return element[0] in self.sets[element[1]]
+ def _kind(self):
+ if not self.args:
+ return SetKind()
+ elif all(i.kind == self.args[0].kind for i in self.args):
+ return self.args[0].kind
+ else:
+ return SetKind(UndefinedKind)
+
def __iter__(self):
if self.is_iterable:
@@ -2446,7 +2584,6 @@ def simplify_intersection(args):
other_sets = args + [s.args[0]]
return Complement(Intersection(*other_sets), s.args[1])
-
from sympy.sets.handlers.intersection import intersection_sets
# At this stage we are guaranteed not to have any
@@ -2488,6 +2625,7 @@ def _handle_finite_sets(op, x, y, commutative):
else:
return None
+
def _apply_operation(op, x, y, commutative):
from .fancysets import ImageSet
d = Dummy('d')
@@ -2508,26 +2646,80 @@ def _apply_operation(op, x, y, commutative):
out = ImageSet(Lambda((_x, _y), op(_x, _y)), x, y)
return out
+
def set_add(x, y):
from sympy.sets.handlers.add import _set_add
return _apply_operation(_set_add, x, y, commutative=True)
+
def set_sub(x, y):
from sympy.sets.handlers.add import _set_sub
return _apply_operation(_set_sub, x, y, commutative=False)
+
def set_mul(x, y):
from sympy.sets.handlers.mul import _set_mul
return _apply_operation(_set_mul, x, y, commutative=True)
+
def set_div(x, y):
from sympy.sets.handlers.mul import _set_div
return _apply_operation(_set_div, x, y, commutative=False)
+
def set_pow(x, y):
from sympy.sets.handlers.power import _set_pow
return _apply_operation(_set_pow, x, y, commutative=False)
+
def set_function(f, x):
from sympy.sets.handlers.functions import _set_function
return _set_function(f, x)
+
+
+class SetKind(Kind):
+ """
+ SetKind is kind for all Sets
+
+ Every instance of Set will have kind ``SetKind`` parametrised by the kind
+ of the elements of the ``Set``. The kind of the elements might be
+ ``NumberKind``, or ``TupleKind`` or something else. When not all elements
+ have the same kind then the kind of the elements will be given as
+ ``UndefinedKind``.
+
+ Parameters
+ ==========
+
+ element_kind: Kind (optional)
+ The kind of the elements of the set. In a well defined set all elements
+ will have the same kind. Otherwise the kind should
+ :class:`sympy.core.kind.UndefinedKind`. The ``element_kind`` argument is optional but
+ should only be omitted in the case of ``EmptySet`` whose kind is simply
+ ``SetKind()``
+
+ Examples
+ ========
+
+ >>> from sympy import Interval
+ >>> Interval(1, 2).kind
+ SetKind(NumberKind)
+ >>> Interval(1,2).kind.element_kind
+ NumberKind
+
+ See Also
+ ========
+
+ sympy.core.kind.NumberKind
+ sympy.matrices.common.MatrixKind
+ sympy.core.containers.TupleKind
+ """
+ def __new__(cls, element_kind=None):
+ obj = super().__new__(cls, element_kind)
+ obj.element_kind = element_kind
+ return obj
+
+ def __repr__(self):
+ if not self.element_kind:
+ return "SetKind()"
+ else:
+ return "SetKind(%s)" % self.element_kind
| diff --git a/sympy/core/tests/test_containers.py b/sympy/core/tests/test_containers.py
index 0863618ea831..daa1be40a9c4 100644
--- a/sympy/core/tests/test_containers.py
+++ b/sympy/core/tests/test_containers.py
@@ -3,12 +3,14 @@
from sympy.core.basic import Basic
from sympy.core.containers import (Dict, Tuple)
from sympy.core.numbers import Integer
+from sympy.core.kind import NumberKind
+from sympy.matrices.common import MatrixKind
from sympy.core.singleton import S
from sympy.core.symbol import symbols
from sympy.core.sympify import sympify
from sympy.matrices.dense import Matrix
from sympy.sets.sets import FiniteSet
-from sympy.core.containers import tuple_wrapper
+from sympy.core.containers import tuple_wrapper, TupleKind
from sympy.core.expr import unchanged
from sympy.core.function import Function, Lambda
from sympy.core.relational import Eq
@@ -150,6 +152,13 @@ def test_iterable_is_sequence():
assert all(iterable(i, exclude=None) for i in not_sympy_iterable)
+def test_TupleKind():
+ kind = TupleKind(NumberKind, MatrixKind(NumberKind))
+ assert Tuple(1, Matrix([1, 2])).kind is kind
+ assert Tuple(1, 2).kind is TupleKind(NumberKind, NumberKind)
+ assert Tuple(1, 2).kind.element_kind == (NumberKind, NumberKind)
+
+
def test_Dict():
x, y, z = symbols('x y z')
d = Dict({x: 1, y: 2, z: 3})
diff --git a/sympy/matrices/expressions/tests/test_sets.py b/sympy/matrices/expressions/tests/test_sets.py
index a01221d5506c..a4205eceeaa0 100644
--- a/sympy/matrices/expressions/tests/test_sets.py
+++ b/sympy/matrices/expressions/tests/test_sets.py
@@ -5,6 +5,9 @@
from sympy.matrices.expressions.sets import MatrixSet
from sympy.matrices.expressions.special import ZeroMatrix
from sympy.testing.pytest import raises
+from sympy.sets.sets import SetKind
+from sympy.matrices.common import MatrixKind
+from sympy.core.kind import NumberKind
def test_MatrixSet():
@@ -33,3 +36,7 @@ def test_MatrixSet():
raises(ValueError, lambda: MatrixSet(2, -2, S.Reals))
raises(ValueError, lambda: MatrixSet(2.4, -1, S.Reals))
raises(TypeError, lambda: MatrixSet(2, 2, (1, 2, 3)))
+
+
+def test_SetKind_MatrixSet():
+ assert MatrixSet(2, 2, set=S.Reals).kind is SetKind(MatrixKind(NumberKind))
diff --git a/sympy/sets/tests/test_conditionset.py b/sympy/sets/tests/test_conditionset.py
index 1a544f9d0bd6..4338bca6361a 100644
--- a/sympy/sets/tests/test_conditionset.py
+++ b/sympy/sets/tests/test_conditionset.py
@@ -1,8 +1,10 @@
from sympy.core.expr import unchanged
from sympy.sets import (ConditionSet, Intersection, FiniteSet,
EmptySet, Union, Contains, ImageSet)
+from sympy.sets.sets import SetKind
from sympy.core.function import (Function, Lambda)
from sympy.core.mod import Mod
+from sympy.core.kind import NumberKind
from sympy.core.numbers import (oo, pi)
from sympy.core.relational import (Eq, Ne)
from sympy.core.singleton import S
@@ -264,6 +266,7 @@ def test_as_relational():
assert ConditionSet(x, x > 1, S.Integers).as_relational(x
) == Contains(x, S.Integers) & (x > 1)
+
def test_flatten():
"""Tests whether there is basic denesting functionality"""
inner = ConditionSet(x, sin(x) + x > 0)
@@ -278,8 +281,14 @@ def test_flatten():
outer = ConditionSet(x, Contains(x, inner), S.Reals)
assert outer == ConditionSet(x, sin(x) + x > 0, Interval(-1, 1))
+
def test_duplicate():
from sympy.core.function import BadSignatureError
# test coverage for line 95 in conditionset.py, check for duplicates in symbols
dup = symbols('a,a')
raises(BadSignatureError, lambda: ConditionSet(dup, x < 0))
+
+
+def test_SetKind_ConditionSet():
+ assert ConditionSet(x, Eq(sin(x), 0), Interval(0, 2*pi)).kind is SetKind(NumberKind)
+ assert ConditionSet(x, x < 0).kind is SetKind(NumberKind)
diff --git a/sympy/sets/tests/test_fancysets.py b/sympy/sets/tests/test_fancysets.py
index 4f670726294f..c92ea3fe99f7 100644
--- a/sympy/sets/tests/test_fancysets.py
+++ b/sympy/sets/tests/test_fancysets.py
@@ -4,12 +4,13 @@
from sympy.sets.fancysets import (ImageSet, Range, normalize_theta_set,
ComplexRegion)
from sympy.sets.sets import (FiniteSet, Interval, Union, imageset,
- Intersection, ProductSet)
+ Intersection, ProductSet, SetKind)
from sympy.sets.conditionset import ConditionSet
from sympy.simplify.simplify import simplify
from sympy.core.basic import Basic
-from sympy.core.containers import Tuple
+from sympy.core.containers import Tuple, TupleKind
from sympy.core.function import Lambda
+from sympy.core.kind import NumberKind
from sympy.core.numbers import (I, Rational, oo, pi)
from sympy.core.relational import Eq
from sympy.core.singleton import S
@@ -1125,6 +1126,20 @@ def test_union_RealSubSet():
assert (S.Complexes).union(S.Integers) == S.Complexes
+def test_SetKind_fancySet():
+ G = lambda *args: ImageSet(Lambda(x, x ** 2), *args)
+ assert G(Interval(1, 4)).kind is SetKind(NumberKind)
+ assert G(FiniteSet(1, 4)).kind is SetKind(NumberKind)
+ assert S.Rationals.kind is SetKind(NumberKind)
+ assert S.Naturals.kind is SetKind(NumberKind)
+ assert S.Integers.kind is SetKind(NumberKind)
+ assert Range(3).kind is SetKind(NumberKind)
+ a = Interval(2, 3)
+ b = Interval(4, 6)
+ c1 = ComplexRegion(a*b)
+ assert c1.kind is SetKind(TupleKind(NumberKind, NumberKind))
+
+
def test_issue_9980():
c1 = ComplexRegion(Interval(1, 2)*Interval(2, 3))
c2 = ComplexRegion(Interval(1, 5)*Interval(1, 3))
diff --git a/sympy/sets/tests/test_sets.py b/sympy/sets/tests/test_sets.py
index a8014523a645..a48240580585 100644
--- a/sympy/sets/tests/test_sets.py
+++ b/sympy/sets/tests/test_sets.py
@@ -1,6 +1,8 @@
from sympy.concrete.summations import Sum
from sympy.core.add import Add
+from sympy.core.containers import TupleKind
from sympy.core.function import Lambda
+from sympy.core.kind import NumberKind, UndefinedKind
from sympy.core.numbers import (Float, I, Rational, nan, oo, pi, zoo)
from sympy.core.power import Pow
from sympy.core.singleton import S
@@ -10,11 +12,12 @@
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import (cos, sin)
from sympy.logic.boolalg import (false, true)
+from sympy.matrices.common import MatrixKind
from sympy.matrices.dense import Matrix
from sympy.polys.rootoftools import rootof
from sympy.sets.contains import Contains
from sympy.sets.fancysets import (ImageSet, Range)
-from sympy.sets.sets import (Complement, DisjointUnion, FiniteSet, Intersection, Interval, ProductSet, Set, SymmetricDifference, Union, imageset)
+from sympy.sets.sets import (Complement, DisjointUnion, FiniteSet, Intersection, Interval, ProductSet, Set, SymmetricDifference, Union, imageset, SetKind)
from mpmath import mpi
from sympy.core.expr import unchanged
@@ -1597,6 +1600,54 @@ def test_DisjointUnion_len():
assert len(DisjointUnion(S.EmptySet, S.EmptySet, FiniteSet(x, y, z), S.EmptySet)) == 3
raises(ValueError, lambda: len(DisjointUnion(Interval(0, 1), S.EmptySet)))
+def test_SetKind_ProductSet():
+ p = ProductSet(FiniteSet(Matrix([1, 2])), FiniteSet(Matrix([1, 2])))
+ mk = MatrixKind(NumberKind)
+ k = SetKind(TupleKind(mk, mk))
+ assert p.kind is k
+ assert ProductSet(Interval(1, 2), FiniteSet(Matrix([1, 2]))).kind is SetKind(TupleKind(NumberKind, mk))
+
+def test_SetKind_Interval():
+ assert Interval(1, 2).kind is SetKind(NumberKind)
+
+def test_SetKind_EmptySet_UniversalSet():
+ assert S.UniversalSet.kind is SetKind(UndefinedKind)
+ assert EmptySet.kind is SetKind()
+
+def test_SetKind_FiniteSet():
+ assert FiniteSet(1, Matrix([1, 2])).kind is SetKind(UndefinedKind)
+ assert FiniteSet(1, 2).kind is SetKind(NumberKind)
+
+def test_SetKind_Unions():
+ assert Union(FiniteSet(Matrix([1, 2])), Interval(1, 2)).kind is SetKind(UndefinedKind)
+ assert Union(Interval(1, 2), Interval(1, 7)).kind is SetKind(NumberKind)
+
+def test_SetKind_DisjointUnion():
+ A = FiniteSet(1, 2, 3)
+ B = Interval(0, 5)
+ assert DisjointUnion(A, B).kind is SetKind(NumberKind)
+
+def test_SetKind_evaluate_False():
+ U = lambda *args: Union(*args, evaluate=False)
+ assert U({1}, EmptySet).kind is SetKind(NumberKind)
+ assert U(Interval(1, 2), EmptySet).kind is SetKind(NumberKind)
+ assert U({1}, S.UniversalSet).kind is SetKind(UndefinedKind)
+ assert U(Interval(1, 2), Interval(4, 5),
+ FiniteSet(1)).kind is SetKind(NumberKind)
+ I = lambda *args: Intersection(*args, evaluate=False)
+ assert I({1}, S.UniversalSet).kind is SetKind(NumberKind)
+ assert I({1}, EmptySet).kind is SetKind()
+ C = lambda *args: Complement(*args, evaluate=False)
+ assert C(S.UniversalSet, {1, 2, 4, 5}).kind is SetKind(UndefinedKind)
+ assert C({1, 2, 3, 4, 5}, EmptySet).kind is SetKind(NumberKind)
+ assert C(EmptySet, {1, 2, 3, 4, 5}).kind is SetKind()
+
+def test_SetKind_ImageSet_Special():
+ f = ImageSet(Lambda(n, n ** 2), Interval(1, 4))
+ assert (f - FiniteSet(3)).kind is SetKind(NumberKind)
+ assert (f + Interval(16, 17)).kind is SetKind(NumberKind)
+ assert (f + FiniteSet(17)).kind is SetKind(NumberKind)
+
def test_issue_20089():
B = FiniteSet(FiniteSet(1, 2), FiniteSet(1))
assert 1 not in B
| diff --git a/.mailmap b/.mailmap
index edf341362f5a..f64a28e9c044 100644
--- a/.mailmap
+++ b/.mailmap
@@ -64,8 +64,6 @@
*Marc-Etienne M.Leveille <protonyc@gmail.com>
*Ulrich Hecht <ulrich.hecht@gmail.com>
2torus <boris.ettinger@gmail.com>
-<felix.kaiser@fxkr.net> <kaiser.fx@gmail.com>
-<fredrik.johansson@gmail.com> <fredrik@airy.(none)>
Aadit Kamat <aadit.k12@gmail.com>
Aaditya Nair <aadityanair6494@gmail.com>
Aaron Meurer <asmeurer@gmail.com>
@@ -173,7 +171,6 @@ Andrey Lekar <andrey_lekar@adoriasoft.com> blackyblack <andrey_lekar@adoriasoft.
Andy R. Terrel <aterrel@uchicago.edu> <andy.terrel@gmail.com>
Andy R. Terrel <aterrel@uchicago.edu> <aterrel@enthought.com>
Andy R. Terrel <aterrel@uchicago.edu> <aterrel@uchicago.edu>
-Andy R. Terrel <aterrel@uchicago.edu> <aterrel@uchicago.edu>
Angad Sandhu <55819847+angadsinghsandhu@users.noreply.github.com>
Angadh Nanjangud <angadh.n@gmail.com>
Angus Griffith <16sn6uv@gmail.com>
@@ -400,6 +397,7 @@ Faisal Anees <faisal.iiit@gmail.com>
Faisal Riyaz <faisalriyaz011@gmail.com>
Fawaz Alazemi <Mba7eth@gmail.com>
Felix Kaiser <felix.kaiser@fxkr.net> <felix.kaiser@fxkr.net>
+Felix Kaiser <felix.kaiser@fxkr.net> <kaiser.fx@gmail.com>
Felix Kaiser <felix.kaiser@fxkr.net> <whatfxkr@gmail.com>
Felix Yan <felixonmars@archlinux.org>
Fermi Paradox <FermiParadox@users.noreply.github.com>
@@ -413,7 +411,9 @@ Francesco Bonazzi <franz.bonazzi@gmail.com> <none@universe.org>
Freddie Witherden <freddie@witherden.org>
Fredrik Andersson <fredrik.andersson@fcc.chalmers.se>
Fredrik Eriksson <freeriks@student.chalmers.se>
-Fredrik Johansson <fredrik.johansson@gmail.com>
+Fredrik Johansson <fredrik.johansson@gmail.com> <fredrik.johansson@gmail.com>
+Fredrik Johansson <fredrik.johansson@gmail.com> <fredrik.johansson@gmail.com> <fredrik@airy.(none)>
+Fredrik Johansson <fredrik.johansson@gmail.com> <fredrik@airy.(none)>
Fredrik Johansson <fredrik.johansson@gmail.com> fredrik.johansson <devnull@localhost>
Friedrich Hagedorn <friedrich_h@gmx.de>
Frédéric Chapoton <fchapoton2@gmail.com> fchapoton <fchapoton2@gmail.com>
@@ -1024,6 +1024,7 @@ Sebastian Krause <sebastian.krause@gmx.de>
Sebastian Kreft <skreft@gmail.com>
Sebastian Krämer <basti.kr@gmail.com> basti.kr <devnull@localhost>
Segev Finer <segev208@gmail.com>
+Sergey B Kirpichev <skirpichev@gmail.com>
Sergey Pestov <pestov-sa@yandex.ru>
Sergiu Ivanov <unlimitedscolobb@gmail.com>
Seshagiri Prabhu <seshagiriprabhu@gmail.com>
@@ -1057,6 +1058,8 @@ Shubham Tibra <shubh.tibra@gmail.com>
Siddhanathan Shanmugam <siddhanathan@gmail.com>
Siddhanathan Shanmugam <siddhanathan@gmail.com> <siddhu@siddhu-laptop.(none)>
Siddhant Jain <getsiddhant@gmail.com>
+Siddhant Jain <siddhantashoknagar@gmail.com> Siddhant Jain <77455093+me-t1me@users.noreply.github.com>
+Siddhant Jain <siddhantashoknagar@gmail.com> me-t1me <siddhantashoknagar@gmail.com>
Sidhant Nagpal <sidhantnagpal97@gmail.com> Sidhant Nagpal <36465988+sidhantnagpal@users.noreply.github.com>
Sidhant Nagpal <sidhantnagpal97@gmail.com> sidhantnagpal <sidhantnagpal97@gmail.com>
Sidhant Nagpal <sidhantnagpal97@gmail.com> “sidhantnagpal” <sidhantnagpal97@gmail.com>
diff --git a/doc/src/modules/core.rst b/doc/src/modules/core.rst
index 0ade07ed6df3..72bf23cb9f77 100644
--- a/doc/src/modules/core.rst
+++ b/doc/src/modules/core.rst
@@ -519,6 +519,11 @@ Tuple
.. autoclass:: Tuple
:members:
+TupleKind
+^^^^^^^^^
+.. autoclass:: TupleKind
+ :members:
+
Dict
^^^^
.. autoclass:: Dict
@@ -550,6 +555,11 @@ NumberKind
.. autoclass:: NumberKind
:members:
+UndefinedKind
+^^^^^^^^^^^^^
+.. autoclass:: UndefinedKind
+ :members:
+
BooleanKind
^^^^^^^^^^^
.. autoclass:: BooleanKind
diff --git a/doc/src/modules/matrices/common.rst b/doc/src/modules/matrices/common.rst
index 5b0d358749d9..0b0226d9e455 100644
--- a/doc/src/modules/matrices/common.rst
+++ b/doc/src/modules/matrices/common.rst
@@ -9,3 +9,7 @@ MatrixCommon Class Reference
:members:
:special-members:
:inherited-members:
+
+MatrixKind
+----------
+.. autoclass:: MatrixKind
\ No newline at end of file
diff --git a/doc/src/modules/sets.rst b/doc/src/modules/sets.rst
index 0bfd37c81864..5d105f942d19 100644
--- a/doc/src/modules/sets.rst
+++ b/doc/src/modules/sets.rst
@@ -58,6 +58,11 @@ SymmetricDifference
.. autoclass:: SymmetricDifference
:members:
+DisjointUnion
+^^^^^^^^^^^^^
+.. autoclass:: DisjointUnion
+ :members:
+
Singleton Sets
--------------
@@ -75,6 +80,11 @@ Special Sets
------------
.. automodule:: sympy.sets.fancysets
+Rationals
+^^^^^^^^^
+.. autoclass:: Rationals
+ :members:
+
Naturals
^^^^^^^^
.. autoclass:: Naturals
@@ -174,3 +184,9 @@ Relations on sets
.. autoclass:: Contains
:members:
+
+SetKind
+--------------
+
+.. autoclass:: SetKind
+ :members:
\ No newline at end of file
| [
{
"components": [
{
"doc": "The kind of a Tuple instance.\n\nThe kind of a Tuple is always of :class:`TupleKind` but\nparametrised by the number of elements and the kind of each element.\n\nExamples\n========\n\n>>> from sympy import Tuple, Matrix\n>>> Tuple(1, 2).kind\nTupleKind(NumberKind, Numbe... | [
"test_Tuple",
"test_Tuple_contains",
"test_Tuple_concatenation",
"test_Tuple_equality",
"test_Tuple_Eq",
"test_Tuple_comparision",
"test_Tuple_tuple_count",
"test_Tuple_index",
"test_Tuple_mul",
"test_tuple_wrapper",
"test_iterable_is_sequence",
"test_TupleKind",
"test_Dict",
"test_MatrixS... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Created SetKind for Set class #22860
<!-- Your title above should be a short description of what
was changed. Do not include the issue number in the title. -->
#### References to other Issues or PRs
<!-- If this pull request fixes an issue, write "Fixes #NNNN" in that exact
format, e.g. "Fixes #1234" (see
https://tinyurl.com/auto-closing for more information). Also, please
write a comment on that issue linking back to this pull request once it is
open. -->
Issue #22860
#### Brief description of what is fixed or changed
Creates Class `SetKind` for all Subclasses of `Set` such as `Interval`, `FiniteSet`, `Union` etc
```python
>>> Interval(1,2).kind
SetKind(NumberKind)
>>> FiniteSet(Matrix([1, 2])).kind
FiniteSet(Matrix([1, 2])).kind
>>> FiniteSet(0, Matrix([1, 2])).kind
SetKind(UndefinedKind)
```
#### Other comments
#### Release Notes
<!-- Write the release notes for this release below between the BEGIN and END
statements. The basic format is a bulleted list with the name of the subpackage
and the release note for this PR. For example:
* solvers
* Added a new solver for logarithmic equations.
* functions
* Fixed a bug with log of integers.
or if no release note(s) should be included use:
NO ENTRY
See https://github.com/sympy/sympy/wiki/Writing-Release-Notes for more
information on how to write release notes. The bot will check your release
notes automatically to see if they are formatted correctly. -->
<!-- BEGIN RELEASE NOTES -->
NO ENTRY
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/core/containers.py]
(definition of Tuple.kind:)
def kind(self):
"""The kind of a Tuple instance.
The kind of a Tuple is always of :class:`TupleKind` but
parametrised by the number of elements and the kind of each element.
Examples
========
>>> from sympy import Tuple, Matrix
>>> Tuple(1, 2).kind
TupleKind(NumberKind, NumberKind)
>>> Tuple(Matrix([1, 2]), 1).kind
TupleKind(MatrixKind(NumberKind), NumberKind)
>>> Tuple(1, 2).kind.element_kind
(NumberKind, NumberKind)
See Also
========
sympy.matrices.common.MatrixKind
sympy.core.kind.NumberKind"""
(definition of TupleKind:)
class TupleKind(Kind):
"""TupleKind is a subclass of Kind, which is used to define Kind of ``Tuple``.
Parameters of TupleKind will be kinds of all the arguments in Tuples, for
example
Parameters
==========
args : tuple(element_kind)
element_kind is kind of element.
args is tuple of kinds of element
Examples
========
>>> from sympy import Tuple
>>> Tuple(1, 2).kind
TupleKind(NumberKind, NumberKind)
>>> Tuple(1, 2).kind.element_kind
(NumberKind, NumberKind)
See Also
========
sympy.core.kind.NumberKind
MatrixKind
sympy.sets.sets.SetKind"""
(definition of TupleKind.__new__:)
def __new__(cls, *args):
(definition of TupleKind.__repr__:)
def __repr__(self):
[end of new definitions in sympy/core/containers.py]
[start of new definitions in sympy/matrices/expressions/sets.py]
(definition of MatrixSet._kind:)
def _kind(self):
[end of new definitions in sympy/matrices/expressions/sets.py]
[start of new definitions in sympy/sets/conditionset.py]
(definition of ConditionSet._kind:)
def _kind(self):
[end of new definitions in sympy/sets/conditionset.py]
[start of new definitions in sympy/sets/fancysets.py]
(definition of Rationals._kind:)
def _kind(self):
(definition of Naturals._kind:)
def _kind(self):
(definition of Integers._kind:)
def _kind(self):
(definition of ImageSet._kind:)
def _kind(self):
(definition of Range._kind:)
def _kind(self):
(definition of ComplexRegion._kind:)
def _kind(self):
[end of new definitions in sympy/sets/fancysets.py]
[start of new definitions in sympy/sets/powerset.py]
(definition of PowerSet.kind:)
def kind(self):
[end of new definitions in sympy/sets/powerset.py]
[start of new definitions in sympy/sets/sets.py]
(definition of Set.kind:)
def kind(self):
"""The kind of a Set
Explanation
===========
Any :class:`Set` will have kind :class:`SetKind` which is
parametrised by the kind of the elements of the set. For example
most sets are sets of numbers and will have kind
``SetKind(NumberKind)``. If elements of sets are different in kind than
their kind will ``SetKind(UndefinedKind)``. See
:class:`sympy.core.kind.Kind` for an explanation of the kind system.
Examples
========
>>> from sympy import Interval, Matrix, FiniteSet, EmptySet, ProductSet, PowerSet
>>> FiniteSet(Matrix([1, 2])).kind
SetKind(MatrixKind(NumberKind))
>>> Interval(1, 2).kind
SetKind(NumberKind)
>>> EmptySet.kind
SetKind()
A :class:`sympy.sets.powerset.PowerSet` is a set of sets:
>>> PowerSet({1, 2, 3}).kind
SetKind(SetKind(NumberKind))
A :class:`ProductSet` represents the set of tuples of elements of
other sets. Its kind is :class:`sympy.core.containers.TupleKind`
parametrised by the kinds of the elements of those sets:
>>> p = ProductSet(FiniteSet(1, 2), FiniteSet(3, 4))
>>> list(p)
[(1, 3), (2, 3), (1, 4), (2, 4)]
>>> p.kind
SetKind(TupleKind(NumberKind, NumberKind))
When all elements of the set do not have same kind, the kind
will be returned as ``SetKind(UndefinedKind)``:
>>> FiniteSet(0, Matrix([1, 2])).kind
SetKind(UndefinedKind)
The kind of the elements of a set are given by the ``element_kind``
attribute of ``SetKind``:
>>> Interval(1, 2).kind.element_kind
NumberKind
See Also
========
NumberKind
sympy.core.kind.UndefinedKind
sympy.core.containers.TupleKind
MatrixKind
sympy.matrices.expressions.sets.MatrixSet
sympy.sets.conditionset.ConditionSet
Rationals
Naturals
Integers
sympy.sets.fancysets.ImageSet
sympy.sets.fancysets.Range
sympy.sets.fancysets.ComplexRegion
sympy.sets.powerset.PowerSet
sympy.sets.sets.ProductSet
sympy.sets.sets.Interval
sympy.sets.sets.Union
sympy.sets.sets.Intersection
sympy.sets.sets.Complement
sympy.sets.sets.EmptySet
sympy.sets.sets.UniversalSet
sympy.sets.sets.FiniteSet
sympy.sets.sets.SymmetricDifference
sympy.sets.sets.DisjointUnion"""
(definition of Set._kind:)
def _kind(self):
(definition of ProductSet._kind:)
def _kind(self):
(definition of Interval._kind:)
def _kind(self):
(definition of Union._kind:)
def _kind(self):
(definition of Intersection._kind:)
def _kind(self):
(definition of Complement._kind:)
def _kind(self):
(definition of EmptySet._kind:)
def _kind(self):
(definition of UniversalSet._kind:)
def _kind(self):
(definition of FiniteSet._kind:)
def _kind(self):
(definition of DisjointUnion._kind:)
def _kind(self):
(definition of SetKind:)
class SetKind(Kind):
"""SetKind is kind for all Sets
Every instance of Set will have kind ``SetKind`` parametrised by the kind
of the elements of the ``Set``. The kind of the elements might be
``NumberKind``, or ``TupleKind`` or something else. When not all elements
have the same kind then the kind of the elements will be given as
``UndefinedKind``.
Parameters
==========
element_kind: Kind (optional)
The kind of the elements of the set. In a well defined set all elements
will have the same kind. Otherwise the kind should
:class:`sympy.core.kind.UndefinedKind`. The ``element_kind`` argument is optional but
should only be omitted in the case of ``EmptySet`` whose kind is simply
``SetKind()``
Examples
========
>>> from sympy import Interval
>>> Interval(1, 2).kind
SetKind(NumberKind)
>>> Interval(1,2).kind.element_kind
NumberKind
See Also
========
sympy.core.kind.NumberKind
sympy.matrices.common.MatrixKind
sympy.core.containers.TupleKind"""
(definition of SetKind.__new__:)
def __new__(cls, element_kind=None):
(definition of SetKind.__repr__:)
def __repr__(self):
[end of new definitions in sympy/sets/sets.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | edf24253833ca153cb6d29ae54092ecebe29614c | |
conan-io__conan-10310 | 10,310 | conan-io/conan | null | f73ada4ca23bfc0c65c944af0b8acb450fe68111 | 2022-01-11T12:08:21Z | diff --git a/conan/tools/microsoft/__init__.py b/conan/tools/microsoft/__init__.py
index 1c0422e0070..155beb03d31 100644
--- a/conan/tools/microsoft/__init__.py
+++ b/conan/tools/microsoft/__init__.py
@@ -1,5 +1,5 @@
from conan.tools.microsoft.toolchain import MSBuildToolchain
from conan.tools.microsoft.msbuild import MSBuild
from conan.tools.microsoft.msbuilddeps import MSBuildDeps
-from conan.tools.microsoft.visual import msvc_runtime_flag, VCVars
+from conan.tools.microsoft.visual import msvc_runtime_flag, VCVars, is_msvc
from conan.tools.microsoft.subsystems import subsystem_path
diff --git a/conan/tools/microsoft/visual.py b/conan/tools/microsoft/visual.py
index 6f758fb650e..9dbdc5986fa 100644
--- a/conan/tools/microsoft/visual.py
+++ b/conan/tools/microsoft/visual.py
@@ -172,3 +172,12 @@ def _vcvars_vers(conanfile, compiler, vs_version):
# The equivalent of compiler 192 is toolset 14.2
vcvars_ver = "14.{}".format(compiler_version[-1])
return vcvars_ver
+
+
+def is_msvc(conanfile):
+ """ Validate if current compiler in setttings is 'Visual Studio' or 'msvc'
+ :param conanfile: ConanFile instance
+ :return: True, if the host compiler is related to Visual Studio, otherwise, False.
+ """
+ settings = conanfile.settings
+ return settings.get_safe("compiler") in ["Visual Studio", "msvc"]
| diff --git a/conans/test/unittests/tools/microsoft/test_msbuild.py b/conans/test/unittests/tools/microsoft/test_msbuild.py
index 78f0cb959c3..f6aab1c5dff 100644
--- a/conans/test/unittests/tools/microsoft/test_msbuild.py
+++ b/conans/test/unittests/tools/microsoft/test_msbuild.py
@@ -4,7 +4,7 @@
import pytest
from mock import Mock
-from conan.tools.microsoft import MSBuild, MSBuildToolchain
+from conan.tools.microsoft import MSBuild, MSBuildToolchain, is_msvc
from conans.model.conf import ConfDefinition, Conf
from conans.model.env_info import EnvValues
from conans.test.utils.mocks import ConanFileMock, MockSettings
@@ -172,3 +172,20 @@ def test_msbuild_and_intel_cc_props(mode, expected_toolset):
props_file = os.path.join(test_folder, 'conantoolchain_release_x64.props')
msbuild.generate()
assert '<PlatformToolset>%s</PlatformToolset>' % expected_toolset in load(props_file)
+
+
+@pytest.mark.parametrize("compiler,expected", [
+ ("Visual Studio", True),
+ ("msvc", True),
+ ("clang", False)
+])
+def test_is_msvc(compiler, expected):
+ settings = Settings({"build_type": ["Release"],
+ "compiler": {compiler: {"version": ["2022"]}},
+ "os": ["Windows"],
+ "arch": ["x86_64"]})
+ conanfile = ConanFile(Mock(), None)
+ conanfile.settings = "os", "compiler", "build_type", "arch"
+ conanfile.initialize(settings, EnvValues())
+ conanfile.settings.compiler = compiler
+ assert is_msvc(conanfile) == expected
| [
{
"components": [
{
"doc": "Validate if current compiler in setttings is 'Visual Studio' or 'msvc'\n:param conanfile: ConanFile instance\n:return: True, if the host compiler is related to Visual Studio, otherwise, False.",
"lines": [
177,
183
],
"name": ... | [
"conans/test/unittests/tools/microsoft/test_msbuild.py::test_msbuild_cpu_count",
"conans/test/unittests/tools/microsoft/test_msbuild.py::test_msbuild_toolset",
"conans/test/unittests/tools/microsoft/test_msbuild.py::test_msbuild_toolset_for_intel_cc[icx-Intel",
"conans/test/unittests/tools/microsoft/test_msbu... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Validate if settings.compiler is related to Visual Studio
Add a new helper to simplify and avoid method copies in Conan Center Index
Changelog: Feature: Add `is_msvc` to validate if `settings.compiler` is `Visual Studio` and `msvc` compilers.
Docs: https://github.com/conan-io/docs/pull/2353
closes #10306
- [x] Refer to the issue that supports this Pull Request.
- [x] If the issue has missing info, explain the purpose/use case/pain/need that covers this Pull Request.
- [x] I've read the [Contributing guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).
- [x] I've followed the PEP8 style guides for Python code.
- [x] I've opened another PR in the Conan docs repo to the ``develop`` branch, documenting this one.
<sup>**Note:** By default this PR will skip the slower tests and will use a limited set of python versions. Check [here](https://github.com/conan-io/conan/blob/develop/.github/PR_INCREASE_TESTING.md) how to increase the testing level by writing some tags in the current PR body text.</sup>
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/microsoft/visual.py]
(definition of is_msvc:)
def is_msvc(conanfile):
"""Validate if current compiler in setttings is 'Visual Studio' or 'msvc'
:param conanfile: ConanFile instance
:return: True, if the host compiler is related to Visual Studio, otherwise, False."""
[end of new definitions in conan/tools/microsoft/visual.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
[feature] Tool: is_msvc helper
Our friend and great contributor SpaceIm is using a new detection which I believe could be part of the mainstream:
```python
@property
def _is_msvc(self):
return str(self.settings.compiler) in ["Visual Studio", "msvc"]
```
This property can be largely re-used, when checking on `validate()` or any other condition.
- [ ] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).
----------
Hi @uilianries
Yes, makes sense, specially to try to have clean recipes for migrating to 2.0 (only msvc will remain there).
Do you want to contribute this? I guess it will be a tool in ``conan.tools.build``?
@memsharded Yes, I can do it of course.
> I guess it will be a tool in conan.tools.build?
Build so far is only related to CPU, however, [visual.py](https://github.com/conan-io/conan/blob/develop/conan/tools/microsoft/visual.py) has all Visual Studio information there. I would suggest `conan.tools.visual` in this case.
> Build so far is only related to CPU, however, visual.py has all Visual Studio information there. I would suggest conan.tools.visual in this case.
``conan.tools.visual`` is not public/documented. In any case it should be ``conan.tools.microsoft``. Yes, it makes sense too. I was thinking where all the possible configuration checks would be, like we don't know if it is "microsoft" yet, we are checking, and this was a generic "check", but I agree, it will feel cleaner in ``conan.tools.microsoft``.
I think we also need a helper(s) for compiler runtime:
https://github.com/conan-io/conan-center-index/pull/8801/files#diff-d67d36ebb633f3e920a34cd24c6c92ac12ab88b1f1db0a694841133faeb10fe5R783-R791
```
@property
def _runtime(self):
if self.settings.compiler == "Visual Studio":
return self.settings.compiler.runtime
else:
return "M{}{}".format(
"T" if self.settings.compiler.runtime == "static" else "D",
"d" if self.settings.compiler.runtime_type == "Debug" else "",
)
```
it's pretty widespread among CCI recipes already
(and one more helper could return runtime in a form of MultiThreaded[Debug][Dll], which is also frequently needed)
``msvc_runtime_flag()`` is already in ``visual.py`` and made available via ``conan.tools.microsoft``
--------------------
</issues> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | |
Textualize__rich-1816 | 1,816 | Textualize/rich | null | d65c3bd53da234a4047c469aa7740e03d1a622f7 | 2022-01-07T15:04:11Z | diff --git a/CHANGELOG.md b/CHANGELOG.md
index abe01b9d04..5350b9f386 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,13 +5,16 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
## [11.0.0] - Unreleased
### Added
-- Fixed issue with pretty repr in jupyter notebook https://github.com/Textualize/rich/issues/1717
- Added max_depth arg to pretty printing
+- Added `vertical_align` to Table.add_row
+
+### Fixed
+
+- Fixed issue with pretty repr in jupyter notebook https://github.com/Textualize/rich/issues/1717
- Fix Traceback theme defaults override user supplied styles https://github.com/Textualize/rich/issues/1786
## [10.16.2] - 2021-01-02
diff --git a/docs/source/tables.rst b/docs/source/tables.rst
index 2603e7912a..967a573825 100644
--- a/docs/source/tables.rst
+++ b/docs/source/tables.rst
@@ -40,7 +40,44 @@ This produces the following output:
</pre>
-Rich is quite smart about rendering the table. It will adjust the column widths to fit the contents and will wrap text if it doesn't fit. You can also add anything that Rich knows how to render as a title or row cell (even another table)!
+Rich will calculate the optimal column sizes to fit your content, and will wrap text to fit if the terminal is not wide enough to fit the contents.
+
+.. note::
+ You are not limited to adding text in the ``add_row`` method. You can add anything that Rich knows how to render (including another table).
+
+Table Options
+~~~~~~~~~~~~~
+
+There are a number of keyword arguments on the Table constructor you can use to define how a table should look.
+
+- ``title`` Sets the title of the table (text show above the table).
+- ``caption`` Sets the table caption (text show below the table).
+- ``width`` Sets the desired width of the table (disables automatic width calculation).
+- ``min_width`` Sets a minimum width for the table.
+- ``box`` Sets one of the :ref:`appendix_box` styles for the table grid, or ``None`` for no grid.
+- ``safe_box`` Set to ``True`` to force the table to generate ASCII characters rather than unicode.
+- ``padding`` A integer, or tuple of 1, 2, or 4 values to set the padding on cells.
+- ``collapse_padding`` If True the padding of neighboring cells will be merged.
+- ``pad_edge`` Set to False to remove padding around the edge of the table.
+- ``expand`` Set to True to expand the table to the full available size.
+- ``show_header`` Set to True to show a header, False to disable it.
+- ``show_footer`` Set to True to show a footer, False to disable it.
+- ``show edge`` Set to False to disable the edge line around the table.
+- ``show_lines`` Set to True to show lines between rows as well as header / footer.
+- ``leading`` Additional space between rows.
+- ``style`` A Style to apply to the entire table, e.g. "on blue"
+- ``row_styles`` Set to a list of styles to style alternating rows. e.g. ``["dim", ""]`` to create *zebra stripes*
+- ``header_style`` Set the default style for the header.
+- ``footer_style`` Set the default style for the footer.
+- ``border style`` Set a style for border characters.
+- ``title_style`` Set a style for the title.
+- ``caption_style`` Set a style for the caption.
+- ``title_justify`` Set the title justify method ("left", "right", "center", or "full")
+- ``caption_justify`` Set the caption justify method ("left", "right", "center", or "full")
+- ``highlight`` Set to True to enable automatic highlighting of cell contents.
+
+Border Styles
+~~~~~~~~~~~~~
You can set the border style by importing one of the preset :class:`~rich.box.Box` objects and setting the ``box`` argument in the table constructor. Here's an example that modifies the look of the Star Wars table::
@@ -49,9 +86,17 @@ You can set the border style by importing one of the preset :class:`~rich.box.Bo
See :ref:`appendix_box` for other box styles.
+You can also set ``box=None`` to remove borders entirely.
+
The :class:`~rich.table.Table` class offers a number of configuration options to set the look and feel of the table, including how borders are rendered and the style and alignment of the columns.
+Lines
+~~~~~
+
+By default, Tables will show a line under the header only. If you want to show lines between all rows add ``show_lines=True`` to the constructor.
+
+
Empty Tables
~~~~~~~~~~~~
@@ -80,10 +125,29 @@ This allows you to specify the text of the column only. If you want to set other
title="Star Wars Movies"
)
-Lines
-~~~~~
+Column Options
+~~~~~~~~~~~~~~
-By default, Tables will show a line under the header only. If you want to show lines between all rows add ``show_lines=True`` to the constructor.
+There are a number of options you can set on a column to modify how it will look.
+
+- ``header_style`` Sets the style of the header, e.g. "bold magenta".
+- ``footer_style`` Sets the style of the footer.
+- ``style`` Sets a style that applies to the column. You could use this to highlight a column by setting the background with "on green" for example.
+- ``justify`` Sets the text justify to one of "left", "center", "right", or "full".
+- ``vertical`` Sets the vertical alignment of the cells in a column, to one of "top", "middle", or "bottom".
+- ``width`` Explicitly set the width of a row to a given number of characters (disables automatic calculation).
+- ``min_width`` When set to an integer will prevent the column from shrinking below this amount.
+- ``max_width`` When set to an integer will prevent the column from growing beyond this amount.
+- ``ratio`` Defines a ratio to set the column width. For instance, if there are 3 columns with a total of 6 ratio, and ``ratio=2`` then the column will be a third of the available size.
+- ``no_wrap`` Set to False to prevent this column from wrapping.
+
+Vertical Alignment
+~~~~~~~~~~~~~~~~~~
+
+You can define the vertical alignment of a column by setting the ``vertical`` parameter of the column. You can also do this per-cell by wrapping your text or renderable with a :class:`~rich.align.Align` class::
+
+
+ table.add_row(Align("Title", vertical="middle"))
Grids
~~~~~
diff --git a/rich/__main__.py b/rich/__main__.py
index 5d1abd10d5..132e809412 100644
--- a/rich/__main__.py
+++ b/rich/__main__.py
@@ -4,13 +4,7 @@
from rich import box
from rich.color import Color
-from rich.console import (
- Console,
- ConsoleOptions,
- Group,
- RenderResult,
- RenderableType,
-)
+from rich.console import Console, ConsoleOptions, Group, RenderableType, RenderResult
from rich.markdown import Markdown
from rich.measure import Measurement
from rich.pretty import Pretty
@@ -247,13 +241,10 @@ def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
sponsor_message = Table.grid(padding=1)
sponsor_message.add_column(style="green", justify="right")
sponsor_message.add_column(no_wrap=True)
+
sponsor_message.add_row(
- "Sponsor me",
- "[u blue link=https://github.com/sponsors/willmcgugan]https://github.com/sponsors/willmcgugan",
- )
- sponsor_message.add_row(
- "Buy me a :coffee:",
- "[u blue link=https://ko-fi.com/willmcgugan]https://ko-fi.com/willmcgugan",
+ "Buy devs a :coffee:",
+ "[u blue link=https://ko-fi.com/textualize]https://ko-fi.com/textualize",
)
sponsor_message.add_row(
"Twitter",
@@ -265,9 +256,9 @@ def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
intro_message = Text.from_markup(
"""\
-It takes a lot of time to develop Rich and to provide support.
+We hope you enjoy using Rich!
-Consider supporting my work via Github Sponsors (ask your company / organization), or buy me a coffee to say thanks.
+Rich is maintained with :heart: by [link=https://www.textualize.io]Textualize.io[/]
- Will McGugan"""
)
diff --git a/rich/segment.py b/rich/segment.py
index c9bb4a494f..97679cefc0 100644
--- a/rich/segment.py
+++ b/rich/segment.py
@@ -12,6 +12,7 @@
Optional,
Sequence,
Tuple,
+ Type,
Union,
)
@@ -384,33 +385,116 @@ def set_shape(
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
- style (Style, optional): Style of any padding added. Defaults to None.
+ style (Style, optional): Style of any padding added.
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
Returns:
- List[List[Segment]]: New list of lines that fits width x height.
+ List[List[Segment]]: New list of lines.
"""
- if height is None:
- height = len(lines)
- shaped_lines: List[List[Segment]] = []
- pad_line = (
- [Segment(" " * width, style), Segment("\n")]
- if new_lines
- else [Segment(" " * width, style)]
+ _height = height or len(lines)
+
+ blank = (
+ [cls(" " * width + "\n", style)] if new_lines else [cls(" " * width, style)]
)
- append = shaped_lines.append
adjust_line_length = cls.adjust_line_length
- line: Optional[List[Segment]]
- iter_lines = iter(lines)
- for _ in range(height):
- line = next(iter_lines, None)
- if line is None:
- append(pad_line)
- else:
- append(adjust_line_length(line, width, style=style))
+ shaped_lines = lines[:_height]
+ shaped_lines[:] = [
+ adjust_line_length(line, width, style=style) for line in lines
+ ]
+ if len(shaped_lines) < _height:
+ shaped_lines.extend([blank] * (_height - len(shaped_lines)))
return shaped_lines
+ @classmethod
+ def align_top(
+ cls: Type["Segment"],
+ lines: List[List["Segment"]],
+ width: int,
+ height: int,
+ style: Style,
+ new_lines: bool = False,
+ ) -> List[List["Segment"]]:
+ """Aligns lines to top (adds extra lines to bottom as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
+ extra_lines = height - len(lines)
+ if not extra_lines:
+ return lines[:]
+ lines = lines[:height]
+ blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+ lines = lines + [[blank]] * extra_lines
+ return lines
+
+ @classmethod
+ def align_bottom(
+ cls: Type["Segment"],
+ lines: List[List["Segment"]],
+ width: int,
+ height: int,
+ style: Style,
+ new_lines: bool = False,
+ ) -> List[List["Segment"]]:
+ """Aligns render to bottom (adds extra lines above as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added. Defaults to None.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
+ extra_lines = height - len(lines)
+ if not extra_lines:
+ return lines[:]
+ lines = lines[:height]
+ blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+ lines = [[blank]] * extra_lines + lines
+ return lines
+
+ @classmethod
+ def align_middle(
+ cls: Type["Segment"],
+ lines: List[List["Segment"]],
+ width: int,
+ height: int,
+ style: Style,
+ new_lines: bool = False,
+ ) -> List[List["Segment"]]:
+ """Aligns lines to middle (adds extra lines to above and below as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
+ extra_lines = height - len(lines)
+ if not extra_lines:
+ return lines[:]
+ lines = lines[:height]
+ blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+ top_lines = extra_lines // 2
+ bottom_lines = extra_lines - top_lines
+ lines = [[blank]] * top_lines + lines + [[blank]] * bottom_lines
+ return lines
+
@classmethod
def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
"""Simplify an iterable of segments by combining contiguous segments with the same style.
diff --git a/rich/table.py b/rich/table.py
index 24d7afbd26..0271d5fc66 100644
--- a/rich/table.py
+++ b/rich/table.py
@@ -1,7 +1,7 @@
from dataclasses import dataclass, field, replace
from typing import (
- Dict,
TYPE_CHECKING,
+ Dict,
Iterable,
List,
NamedTuple,
@@ -15,6 +15,7 @@
from ._loop import loop_first_last, loop_last
from ._pick import pick_bool
from ._ratio import ratio_distribute, ratio_reduce
+from .align import VerticalAlignMethod
from .jupyter import JupyterMixin
from .measure import Measurement
from .padding import Padding, PaddingDimensions
@@ -56,6 +57,9 @@ class Column:
justify: "JustifyMethod" = "left"
"""str: How to justify text within the column ("left", "center", "right", or "full")"""
+ vertical: "VerticalAlignMethod" = "top"
+ """str: How to vertically align content ("top", "middle", or "bottom")"""
+
overflow: "OverflowMethod" = "ellipsis"
"""str: Overflow method."""
@@ -112,6 +116,8 @@ class _Cell(NamedTuple):
"""Style to apply to cell."""
renderable: "RenderableType"
"""Cell renderable."""
+ vertical: VerticalAlignMethod
+ """Cell vertical alignment."""
class Table(JupyterMixin):
@@ -335,6 +341,7 @@ def add_column(
footer_style: Optional[StyleType] = None,
style: Optional[StyleType] = None,
justify: "JustifyMethod" = "left",
+ vertical: "VerticalAlignMethod" = "top",
overflow: "OverflowMethod" = "ellipsis",
width: Optional[int] = None,
min_width: Optional[int] = None,
@@ -353,6 +360,7 @@ def add_column(
footer_style (Union[str, Style], optional): Style for the footer, or None for default. Defaults to None.
style (Union[str, Style], optional): Style for the column cells, or None for default. Defaults to None.
justify (JustifyMethod, optional): Alignment for cells. Defaults to "left".
+ vertical (VerticalAlignMethod, optional): Vertical alignment, one of "top", "middle", or "bottom". Defaults to "top".
overflow (OverflowMethod): Overflow method: "crop", "fold", "ellipsis". Defaults to "ellipsis".
width (int, optional): Desired width of column in characters, or None to fit to contents. Defaults to None.
min_width (Optional[int], optional): Minimum width of column, or ``None`` for no minimum. Defaults to None.
@@ -369,6 +377,7 @@ def add_column(
footer_style=footer_style or "",
style=style or "",
justify=justify,
+ vertical=vertical,
overflow=overflow,
width=width,
min_width=min_width,
@@ -636,10 +645,18 @@ def get_padding(first_row: bool, last_row: bool) -> Tuple[int, int, int, int]:
if any_padding:
_Padding = Padding
for first, last, (style, renderable) in loop_first_last(raw_cells):
- yield _Cell(style, _Padding(renderable, get_padding(first, last)))
+ yield _Cell(
+ style,
+ _Padding(renderable, get_padding(first, last)),
+ getattr(renderable, "vertical", None) or column.vertical,
+ )
else:
for (style, renderable) in raw_cells:
- yield _Cell(style, renderable)
+ yield _Cell(
+ style,
+ renderable,
+ getattr(renderable, "vertical", None) or column.vertical,
+ )
def _get_padding_width(self, column_index: int) -> int:
"""Get extra width from padding."""
@@ -770,18 +787,45 @@ def _render(
overflow=column.overflow,
height=None,
)
- cell_style = table_style + row_style + get_style(cell.style)
lines = console.render_lines(
- cell.renderable, render_options, style=cell_style
+ cell.renderable,
+ render_options,
+ style=get_style(cell.style) + row_style,
)
max_height = max(max_height, len(lines))
cells.append(lines)
+ row_height = max(len(cell) for cell in cells)
+
+ def align_cell(
+ cell: List[List[Segment]],
+ vertical: "VerticalAlignMethod",
+ width: int,
+ style: Style,
+ ) -> List[List[Segment]]:
+ if header_row:
+ vertical = "bottom"
+ elif footer_row:
+ vertical = "top"
+
+ if vertical == "top":
+ return _Segment.align_top(cell, width, row_height, style)
+ elif vertical == "middle":
+ return _Segment.align_middle(cell, width, row_height, style)
+ return _Segment.align_bottom(cell, width, row_height, style)
+
cells[:] = [
_Segment.set_shape(
- _cell, width, max_height, style=table_style + row_style
+ align_cell(
+ cell,
+ _cell.vertical,
+ width,
+ get_style(_cell.style) + row_style,
+ ),
+ width,
+ max_height,
)
- for width, _cell in zip(widths, cells)
+ for width, _cell, cell, column in zip(widths, row_cell, cells, columns)
]
if _box:
| diff --git a/tests/_card_render.py b/tests/_card_render.py
index 232191da0f..ac8d7d42df 100644
--- a/tests/_card_render.py
+++ b/tests/_card_render.py
@@ -1,1 +1,1 @@
-expected = """\x1b[3m Rich features \x1b[0m\n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Colors \x1b[0m\x1b[1;31m \x1b[0m✓ \x1b[1;32m4-bit color\x1b[0m \x1b[38;2;86;0;0;48;2;51;0;0m▄\x1b[0m\x1b[38;2;86;9;0;48;2;51;5;0m▄\x1b[0m\x1b[38;2;86;18;0;48;2;51;11;0m▄\x1b[0m\x1b[38;2;86;28;0;48;2;51;16;0m▄\x1b[0m\x1b[38;2;86;37;0;48;2;51;22;0m▄\x1b[0m\x1b[38;2;86;47;0;48;2;51;27;0m▄\x1b[0m\x1b[38;2;86;56;0;48;2;51;33;0m▄\x1b[0m\x1b[38;2;86;66;0;48;2;51;38;0m▄\x1b[0m\x1b[38;2;86;75;0;48;2;51;44;0m▄\x1b[0m\x1b[38;2;86;85;0;48;2;51;50;0m▄\x1b[0m\x1b[38;2;78;86;0;48;2;46;51;0m▄\x1b[0m\x1b[38;2;69;86;0;48;2;40;51;0m▄\x1b[0m\x1b[38;2;59;86;0;48;2;35;51;0m▄\x1b[0m\x1b[38;2;50;86;0;48;2;29;51;0m▄\x1b[0m\x1b[38;2;40;86;0;48;2;24;51;0m▄\x1b[0m\x1b[38;2;31;86;0;48;2;18;51;0m▄\x1b[0m\x1b[38;2;22;86;0;48;2;12;51;0m▄\x1b[0m\x1b[38;2;12;86;0;48;2;7;51;0m▄\x1b[0m\x1b[38;2;3;86;0;48;2;1;51;0m▄\x1b[0m\x1b[38;2;0;86;6;48;2;0;51;3m▄\x1b[0m\x1b[38;2;0;86;15;48;2;0;51;9m▄\x1b[0m\x1b[38;2;0;86;25;48;2;0;51;14m▄\x1b[0m\x1b[38;2;0;86;34;48;2;0;51;20m▄\x1b[0m\x1b[38;2;0;86;44;48;2;0;51;25m▄\x1b[0m\x1b[38;2;0;86;53;48;2;0;51;31m▄\x1b[0m\x1b[38;2;0;86;63;48;2;0;51;37m▄\x1b[0m\x1b[38;2;0;86;72;48;2;0;51;42m▄\x1b[0m\x1b[38;2;0;86;81;48;2;0;51;48m▄\x1b[0m\x1b[38;2;0;81;86;48;2;0;48;51m▄\x1b[0m\x1b[38;2;0;72;86;48;2;0;42;51m▄\x1b[0m\x1b[38;2;0;63;86;48;2;0;37;51m▄\x1b[0m\x1b[38;2;0;53;86;48;2;0;31;51m▄\x1b[0m\x1b[38;2;0;44;86;48;2;0;25;51m▄\x1b[0m\x1b[38;2;0;34;86;48;2;0;20;51m▄\x1b[0m\x1b[38;2;0;25;86;48;2;0;14;51m▄\x1b[0m\x1b[38;2;0;15;86;48;2;0;9;51m▄\x1b[0m\x1b[38;2;0;6;86;48;2;0;3;51m▄\x1b[0m\x1b[38;2;3;0;86;48;2;1;0;51m▄\x1b[0m\x1b[38;2;12;0;86;48;2;7;0;51m▄\x1b[0m\x1b[38;2;22;0;86;48;2;12;0;51m▄\x1b[0m\x1b[38;2;31;0;86;48;2;18;0;51m▄\x1b[0m\x1b[38;2;40;0;86;48;2;24;0;51m▄\x1b[0m\x1b[38;2;50;0;86;48;2;29;0;51m▄\x1b[0m\x1b[38;2;59;0;86;48;2;35;0;51m▄\x1b[0m\x1b[38;2;69;0;86;48;2;40;0;51m▄\x1b[0m\x1b[38;2;78;0;86;48;2;46;0;51m▄\x1b[0m\x1b[38;2;86;0;85;48;2;51;0;50m▄\x1b[0m\x1b[38;2;86;0;75;48;2;51;0;44m▄\x1b[0m\x1b[38;2;86;0;66;48;2;51;0;38m▄\x1b[0m\x1b[38;2;86;0;56;48;2;51;0;33m▄\x1b[0m\x1b[38;2;86;0;47;48;2;51;0;27m▄\x1b[0m\x1b[38;2;86;0;37;48;2;51;0;22m▄\x1b[0m\x1b[38;2;86;0;28;48;2;51;0;16m▄\x1b[0m\x1b[38;2;86;0;18;48;2;51;0;11m▄\x1b[0m\x1b[38;2;86;0;9;48;2;51;0;5m▄\x1b[0m \n ✓ \x1b[1;34m8-bit color\x1b[0m \x1b[38;2;158;0;0;48;2;122;0;0m▄\x1b[0m\x1b[38;2;158;17;0;48;2;122;13;0m▄\x1b[0m\x1b[38;2;158;34;0;48;2;122;26;0m▄\x1b[0m\x1b[38;2;158;51;0;48;2;122;40;0m▄\x1b[0m\x1b[38;2;158;68;0;48;2;122;53;0m▄\x1b[0m\x1b[38;2;158;86;0;48;2;122;66;0m▄\x1b[0m\x1b[38;2;158;103;0;48;2;122;80;0m▄\x1b[0m\x1b[38;2;158;120;0;48;2;122;93;0m▄\x1b[0m\x1b[38;2;158;137;0;48;2;122;106;0m▄\x1b[0m\x1b[38;2;158;155;0;48;2;122;120;0m▄\x1b[0m\x1b[38;2;143;158;0;48;2;111;122;0m▄\x1b[0m\x1b[38;2;126;158;0;48;2;97;122;0m▄\x1b[0m\x1b[38;2;109;158;0;48;2;84;122;0m▄\x1b[0m\x1b[38;2;91;158;0;48;2;71;122;0m▄\x1b[0m\x1b[38;2;74;158;0;48;2;57;122;0m▄\x1b[0m\x1b[38;2;57;158;0;48;2;44;122;0m▄\x1b[0m\x1b[38;2;40;158;0;48;2;31;122;0m▄\x1b[0m\x1b[38;2;22;158;0;48;2;17;122;0m▄\x1b[0m\x1b[38;2;5;158;0;48;2;4;122;0m▄\x1b[0m\x1b[38;2;0;158;11;48;2;0;122;8m▄\x1b[0m\x1b[38;2;0;158;28;48;2;0;122;22m▄\x1b[0m\x1b[38;2;0;158;45;48;2;0;122;35m▄\x1b[0m\x1b[38;2;0;158;63;48;2;0;122;48m▄\x1b[0m\x1b[38;2;0;158;80;48;2;0;122;62m▄\x1b[0m\x1b[38;2;0;158;97;48;2;0;122;75m▄\x1b[0m\x1b[38;2;0;158;114;48;2;0;122;89m▄\x1b[0m\x1b[38;2;0;158;132;48;2;0;122;102m▄\x1b[0m\x1b[38;2;0;158;149;48;2;0;122;115m▄\x1b[0m\x1b[38;2;0;149;158;48;2;0;115;122m▄\x1b[0m\x1b[38;2;0;132;158;48;2;0;102;122m▄\x1b[0m\x1b[38;2;0;114;158;48;2;0;89;122m▄\x1b[0m\x1b[38;2;0;97;158;48;2;0;75;122m▄\x1b[0m\x1b[38;2;0;80;158;48;2;0;62;122m▄\x1b[0m\x1b[38;2;0;63;158;48;2;0;48;122m▄\x1b[0m\x1b[38;2;0;45;158;48;2;0;35;122m▄\x1b[0m\x1b[38;2;0;28;158;48;2;0;22;122m▄\x1b[0m\x1b[38;2;0;11;158;48;2;0;8;122m▄\x1b[0m\x1b[38;2;5;0;158;48;2;4;0;122m▄\x1b[0m\x1b[38;2;22;0;158;48;2;17;0;122m▄\x1b[0m\x1b[38;2;40;0;158;48;2;31;0;122m▄\x1b[0m\x1b[38;2;57;0;158;48;2;44;0;122m▄\x1b[0m\x1b[38;2;74;0;158;48;2;57;0;122m▄\x1b[0m\x1b[38;2;91;0;158;48;2;71;0;122m▄\x1b[0m\x1b[38;2;109;0;158;48;2;84;0;122m▄\x1b[0m\x1b[38;2;126;0;158;48;2;97;0;122m▄\x1b[0m\x1b[38;2;143;0;158;48;2;111;0;122m▄\x1b[0m\x1b[38;2;158;0;155;48;2;122;0;120m▄\x1b[0m\x1b[38;2;158;0;137;48;2;122;0;106m▄\x1b[0m\x1b[38;2;158;0;120;48;2;122;0;93m▄\x1b[0m\x1b[38;2;158;0;103;48;2;122;0;80m▄\x1b[0m\x1b[38;2;158;0;86;48;2;122;0;66m▄\x1b[0m\x1b[38;2;158;0;68;48;2;122;0;53m▄\x1b[0m\x1b[38;2;158;0;51;48;2;122;0;40m▄\x1b[0m\x1b[38;2;158;0;34;48;2;122;0;26m▄\x1b[0m\x1b[38;2;158;0;17;48;2;122;0;13m▄\x1b[0m \n ✓ \x1b[1;35mTruecolor (16.7 million)\x1b[0m \x1b[38;2;229;0;0;48;2;193;0;0m▄\x1b[0m\x1b[38;2;229;25;0;48;2;193;21;0m▄\x1b[0m\x1b[38;2;229;50;0;48;2;193;42;0m▄\x1b[0m\x1b[38;2;229;75;0;48;2;193;63;0m▄\x1b[0m\x1b[38;2;229;100;0;48;2;193;84;0m▄\x1b[0m\x1b[38;2;229;125;0;48;2;193;105;0m▄\x1b[0m\x1b[38;2;229;150;0;48;2;193;126;0m▄\x1b[0m\x1b[38;2;229;175;0;48;2;193;147;0m▄\x1b[0m\x1b[38;2;229;200;0;48;2;193;169;0m▄\x1b[0m\x1b[38;2;229;225;0;48;2;193;190;0m▄\x1b[0m\x1b[38;2;208;229;0;48;2;176;193;0m▄\x1b[0m\x1b[38;2;183;229;0;48;2;155;193;0m▄\x1b[0m\x1b[38;2;158;229;0;48;2;133;193;0m▄\x1b[0m\x1b[38;2;133;229;0;48;2;112;193;0m▄\x1b[0m\x1b[38;2;108;229;0;48;2;91;193;0m▄\x1b[0m\x1b[38;2;83;229;0;48;2;70;193;0m▄\x1b[0m\x1b[38;2;58;229;0;48;2;49;193;0m▄\x1b[0m\x1b[38;2;33;229;0;48;2;28;193;0m▄\x1b[0m\x1b[38;2;8;229;0;48;2;7;193;0m▄\x1b[0m\x1b[38;2;0;229;16;48;2;0;193;14m▄\x1b[0m\x1b[38;2;0;229;41;48;2;0;193;35m▄\x1b[0m\x1b[38;2;0;229;66;48;2;0;193;56m▄\x1b[0m\x1b[38;2;0;229;91;48;2;0;193;77m▄\x1b[0m\x1b[38;2;0;229;116;48;2;0;193;98m▄\x1b[0m\x1b[38;2;0;229;141;48;2;0;193;119m▄\x1b[0m\x1b[38;2;0;229;166;48;2;0;193;140m▄\x1b[0m\x1b[38;2;0;229;191;48;2;0;193;162m▄\x1b[0m\x1b[38;2;0;229;216;48;2;0;193;183m▄\x1b[0m\x1b[38;2;0;216;229;48;2;0;183;193m▄\x1b[0m\x1b[38;2;0;191;229;48;2;0;162;193m▄\x1b[0m\x1b[38;2;0;166;229;48;2;0;140;193m▄\x1b[0m\x1b[38;2;0;141;229;48;2;0;119;193m▄\x1b[0m\x1b[38;2;0;116;229;48;2;0;98;193m▄\x1b[0m\x1b[38;2;0;91;229;48;2;0;77;193m▄\x1b[0m\x1b[38;2;0;66;229;48;2;0;56;193m▄\x1b[0m\x1b[38;2;0;41;229;48;2;0;35;193m▄\x1b[0m\x1b[38;2;0;16;229;48;2;0;14;193m▄\x1b[0m\x1b[38;2;8;0;229;48;2;7;0;193m▄\x1b[0m\x1b[38;2;33;0;229;48;2;28;0;193m▄\x1b[0m\x1b[38;2;58;0;229;48;2;49;0;193m▄\x1b[0m\x1b[38;2;83;0;229;48;2;70;0;193m▄\x1b[0m\x1b[38;2;108;0;229;48;2;91;0;193m▄\x1b[0m\x1b[38;2;133;0;229;48;2;112;0;193m▄\x1b[0m\x1b[38;2;158;0;229;48;2;133;0;193m▄\x1b[0m\x1b[38;2;183;0;229;48;2;155;0;193m▄\x1b[0m\x1b[38;2;208;0;229;48;2;176;0;193m▄\x1b[0m\x1b[38;2;229;0;225;48;2;193;0;190m▄\x1b[0m\x1b[38;2;229;0;200;48;2;193;0;169m▄\x1b[0m\x1b[38;2;229;0;175;48;2;193;0;147m▄\x1b[0m\x1b[38;2;229;0;150;48;2;193;0;126m▄\x1b[0m\x1b[38;2;229;0;125;48;2;193;0;105m▄\x1b[0m\x1b[38;2;229;0;100;48;2;193;0;84m▄\x1b[0m\x1b[38;2;229;0;75;48;2;193;0;63m▄\x1b[0m\x1b[38;2;229;0;50;48;2;193;0;42m▄\x1b[0m\x1b[38;2;229;0;25;48;2;193;0;21m▄\x1b[0m \n ✓ \x1b[1;33mDumb terminals\x1b[0m \x1b[38;2;254;45;45;48;2;255;10;10m▄\x1b[0m\x1b[38;2;254;68;45;48;2;255;36;10m▄\x1b[0m\x1b[38;2;254;91;45;48;2;255;63;10m▄\x1b[0m\x1b[38;2;254;114;45;48;2;255;90;10m▄\x1b[0m\x1b[38;2;254;137;45;48;2;255;117;10m▄\x1b[0m\x1b[38;2;254;159;45;48;2;255;143;10m▄\x1b[0m\x1b[38;2;254;182;45;48;2;255;170;10m▄\x1b[0m\x1b[38;2;254;205;45;48;2;255;197;10m▄\x1b[0m\x1b[38;2;254;228;45;48;2;255;223;10m▄\x1b[0m\x1b[38;2;254;251;45;48;2;255;250;10m▄\x1b[0m\x1b[38;2;235;254;45;48;2;232;255;10m▄\x1b[0m\x1b[38;2;213;254;45;48;2;206;255;10m▄\x1b[0m\x1b[38;2;190;254;45;48;2;179;255;10m▄\x1b[0m\x1b[38;2;167;254;45;48;2;152;255;10m▄\x1b[0m\x1b[38;2;144;254;45;48;2;125;255;10m▄\x1b[0m\x1b[38;2;121;254;45;48;2;99;255;10m▄\x1b[0m\x1b[38;2;99;254;45;48;2;72;255;10m▄\x1b[0m\x1b[38;2;76;254;45;48;2;45;255;10m▄\x1b[0m\x1b[38;2;53;254;45;48;2;19;255;10m▄\x1b[0m\x1b[38;2;45;254;61;48;2;10;255;28m▄\x1b[0m\x1b[38;2;45;254;83;48;2;10;255;54m▄\x1b[0m\x1b[38;2;45;254;106;48;2;10;255;81m▄\x1b[0m\x1b[38;2;45;254;129;48;2;10;255;108m▄\x1b[0m\x1b[38;2;45;254;152;48;2;10;255;134m▄\x1b[0m\x1b[38;2;45;254;175;48;2;10;255;161m▄\x1b[0m\x1b[38;2;45;254;197;48;2;10;255;188m▄\x1b[0m\x1b[38;2;45;254;220;48;2;10;255;214m▄\x1b[0m\x1b[38;2;45;254;243;48;2;10;255;241m▄\x1b[0m\x1b[38;2;45;243;254;48;2;10;241;255m▄\x1b[0m\x1b[38;2;45;220;254;48;2;10;214;255m▄\x1b[0m\x1b[38;2;45;197;254;48;2;10;188;255m▄\x1b[0m\x1b[38;2;45;175;254;48;2;10;161;255m▄\x1b[0m\x1b[38;2;45;152;254;48;2;10;134;255m▄\x1b[0m\x1b[38;2;45;129;254;48;2;10;108;255m▄\x1b[0m\x1b[38;2;45;106;254;48;2;10;81;255m▄\x1b[0m\x1b[38;2;45;83;254;48;2;10;54;255m▄\x1b[0m\x1b[38;2;45;61;254;48;2;10;28;255m▄\x1b[0m\x1b[38;2;53;45;254;48;2;19;10;255m▄\x1b[0m\x1b[38;2;76;45;254;48;2;45;10;255m▄\x1b[0m\x1b[38;2;99;45;254;48;2;72;10;255m▄\x1b[0m\x1b[38;2;121;45;254;48;2;99;10;255m▄\x1b[0m\x1b[38;2;144;45;254;48;2;125;10;255m▄\x1b[0m\x1b[38;2;167;45;254;48;2;152;10;255m▄\x1b[0m\x1b[38;2;190;45;254;48;2;179;10;255m▄\x1b[0m\x1b[38;2;213;45;254;48;2;206;10;255m▄\x1b[0m\x1b[38;2;235;45;254;48;2;232;10;255m▄\x1b[0m\x1b[38;2;254;45;251;48;2;255;10;250m▄\x1b[0m\x1b[38;2;254;45;228;48;2;255;10;223m▄\x1b[0m\x1b[38;2;254;45;205;48;2;255;10;197m▄\x1b[0m\x1b[38;2;254;45;182;48;2;255;10;170m▄\x1b[0m\x1b[38;2;254;45;159;48;2;255;10;143m▄\x1b[0m\x1b[38;2;254;45;137;48;2;255;10;117m▄\x1b[0m\x1b[38;2;254;45;114;48;2;255;10;90m▄\x1b[0m\x1b[38;2;254;45;91;48;2;255;10;63m▄\x1b[0m\x1b[38;2;254;45;68;48;2;255;10;36m▄\x1b[0m \n ✓ \x1b[1;36mAutomatic color conversion\x1b[0m \x1b[38;2;255;117;117;48;2;255;81;81m▄\x1b[0m\x1b[38;2;255;132;117;48;2;255;100;81m▄\x1b[0m\x1b[38;2;255;147;117;48;2;255;119;81m▄\x1b[0m\x1b[38;2;255;162;117;48;2;255;138;81m▄\x1b[0m\x1b[38;2;255;177;117;48;2;255;157;81m▄\x1b[0m\x1b[38;2;255;192;117;48;2;255;176;81m▄\x1b[0m\x1b[38;2;255;207;117;48;2;255;195;81m▄\x1b[0m\x1b[38;2;255;222;117;48;2;255;214;81m▄\x1b[0m\x1b[38;2;255;237;117;48;2;255;232;81m▄\x1b[0m\x1b[38;2;255;252;117;48;2;255;251;81m▄\x1b[0m\x1b[38;2;242;255;117;48;2;239;255;81m▄\x1b[0m\x1b[38;2;227;255;117;48;2;220;255;81m▄\x1b[0m\x1b[38;2;212;255;117;48;2;201;255;81m▄\x1b[0m\x1b[38;2;197;255;117;48;2;182;255;81m▄\x1b[0m\x1b[38;2;182;255;117;48;2;163;255;81m▄\x1b[0m\x1b[38;2;167;255;117;48;2;144;255;81m▄\x1b[0m\x1b[38;2;152;255;117;48;2;125;255;81m▄\x1b[0m\x1b[38;2;137;255;117;48;2;106;255;81m▄\x1b[0m\x1b[38;2;122;255;117;48;2;87;255;81m▄\x1b[0m\x1b[38;2;117;255;127;48;2;81;255;94m▄\x1b[0m\x1b[38;2;117;255;142;48;2;81;255;113m▄\x1b[0m\x1b[38;2;117;255;157;48;2;81;255;132m▄\x1b[0m\x1b[38;2;117;255;172;48;2;81;255;150m▄\x1b[0m\x1b[38;2;117;255;187;48;2;81;255;169m▄\x1b[0m\x1b[38;2;117;255;202;48;2;81;255;188m▄\x1b[0m\x1b[38;2;117;255;217;48;2;81;255;207m▄\x1b[0m\x1b[38;2;117;255;232;48;2;81;255;226m▄\x1b[0m\x1b[38;2;117;255;247;48;2;81;255;245m▄\x1b[0m\x1b[38;2;117;247;255;48;2;81;245;255m▄\x1b[0m\x1b[38;2;117;232;255;48;2;81;226;255m▄\x1b[0m\x1b[38;2;117;217;255;48;2;81;207;255m▄\x1b[0m\x1b[38;2;117;202;255;48;2;81;188;255m▄\x1b[0m\x1b[38;2;117;187;255;48;2;81;169;255m▄\x1b[0m\x1b[38;2;117;172;255;48;2;81;150;255m▄\x1b[0m\x1b[38;2;117;157;255;48;2;81;132;255m▄\x1b[0m\x1b[38;2;117;142;255;48;2;81;113;255m▄\x1b[0m\x1b[38;2;117;127;255;48;2;81;94;255m▄\x1b[0m\x1b[38;2;122;117;255;48;2;87;81;255m▄\x1b[0m\x1b[38;2;137;117;255;48;2;106;81;255m▄\x1b[0m\x1b[38;2;152;117;255;48;2;125;81;255m▄\x1b[0m\x1b[38;2;167;117;255;48;2;144;81;255m▄\x1b[0m\x1b[38;2;182;117;255;48;2;163;81;255m▄\x1b[0m\x1b[38;2;197;117;255;48;2;182;81;255m▄\x1b[0m\x1b[38;2;212;117;255;48;2;201;81;255m▄\x1b[0m\x1b[38;2;227;117;255;48;2;220;81;255m▄\x1b[0m\x1b[38;2;242;117;255;48;2;239;81;255m▄\x1b[0m\x1b[38;2;255;117;252;48;2;255;81;251m▄\x1b[0m\x1b[38;2;255;117;237;48;2;255;81;232m▄\x1b[0m\x1b[38;2;255;117;222;48;2;255;81;214m▄\x1b[0m\x1b[38;2;255;117;207;48;2;255;81;195m▄\x1b[0m\x1b[38;2;255;117;192;48;2;255;81;176m▄\x1b[0m\x1b[38;2;255;117;177;48;2;255;81;157m▄\x1b[0m\x1b[38;2;255;117;162;48;2;255;81;138m▄\x1b[0m\x1b[38;2;255;117;147;48;2;255;81;119m▄\x1b[0m\x1b[38;2;255;117;132;48;2;255;81;100m▄\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Styles \x1b[0m\x1b[1;31m \x1b[0mAll ansi styles: \x1b[1mbold\x1b[0m, \x1b[2mdim\x1b[0m, \x1b[3mitalic\x1b[0m, \x1b[4munderline\x1b[0m, \x1b[9mstrikethrough\x1b[0m, \x1b[7mreverse\x1b[0m, and even \n \x1b[5mblink\x1b[0m. \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Text \x1b[0m\x1b[1;31m \x1b[0mWord wrap text. Justify \x1b[32mleft\x1b[0m, \x1b[33mcenter\x1b[0m, \x1b[34mright\x1b[0m or \x1b[31mfull\x1b[0m. \n \n \x1b[32mLorem ipsum dolor \x1b[0m \x1b[33m Lorem ipsum dolor \x1b[0m \x1b[34m Lorem ipsum dolor\x1b[0m \x1b[31mLorem\x1b[0m\x1b[31m \x1b[0m\x1b[31mipsum\x1b[0m\x1b[31m \x1b[0m\x1b[31mdolor\x1b[0m\x1b[31m \x1b[0m\x1b[31msit\x1b[0m \n \x1b[32msit amet, \x1b[0m \x1b[33m sit amet, \x1b[0m \x1b[34m sit amet,\x1b[0m \x1b[31mamet,\x1b[0m\x1b[31m \x1b[0m\x1b[31mconsectetur\x1b[0m \n \x1b[32mconsectetur \x1b[0m \x1b[33m consectetur \x1b[0m \x1b[34m consectetur\x1b[0m \x1b[31madipiscing\x1b[0m\x1b[31m \x1b[0m\x1b[31melit.\x1b[0m \n \x1b[32madipiscing elit. \x1b[0m \x1b[33m adipiscing elit. \x1b[0m \x1b[34m adipiscing elit.\x1b[0m \x1b[31mQuisque\x1b[0m\x1b[31m \x1b[0m\x1b[31min\x1b[0m\x1b[31m \x1b[0m\x1b[31mmetus\x1b[0m\x1b[31m \x1b[0m\x1b[31msed\x1b[0m \n \x1b[32mQuisque in metus sed\x1b[0m \x1b[33mQuisque in metus sed\x1b[0m \x1b[34mQuisque in metus sed\x1b[0m \x1b[31msapien\x1b[0m\x1b[31m \x1b[0m\x1b[31multricies\x1b[0m \n \x1b[32msapien ultricies \x1b[0m \x1b[33m sapien ultricies \x1b[0m \x1b[34m sapien ultricies\x1b[0m \x1b[31mpretium\x1b[0m\x1b[31m \x1b[0m\x1b[31ma\x1b[0m\x1b[31m \x1b[0m\x1b[31mat\x1b[0m\x1b[31m \x1b[0m\x1b[31mjusto.\x1b[0m \n \x1b[32mpretium a at justo. \x1b[0m \x1b[33mpretium a at justo. \x1b[0m \x1b[34m pretium a at justo.\x1b[0m \x1b[31mMaecenas\x1b[0m\x1b[31m \x1b[0m\x1b[31mluctus\x1b[0m\x1b[31m \x1b[0m\x1b[31mvelit\x1b[0m \n \x1b[32mMaecenas luctus \x1b[0m \x1b[33m Maecenas luctus \x1b[0m \x1b[34m Maecenas luctus\x1b[0m \x1b[31met auctor maximus.\x1b[0m \n \x1b[32mvelit et auctor \x1b[0m \x1b[33m velit et auctor \x1b[0m \x1b[34m velit et auctor\x1b[0m \n \x1b[32mmaximus. \x1b[0m \x1b[33m maximus. \x1b[0m \x1b[34m maximus.\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Asian \x1b[0m\x1b[1;31m \x1b[0m🇨🇳 该库支持中文,日文和韩文文本! \n\x1b[1;31m \x1b[0m\x1b[1;31m language \x1b[0m\x1b[1;31m \x1b[0m🇯🇵 ライブラリは中国語、日本語、韓国語のテキストをサポートしています \n\x1b[1;31m \x1b[0m\x1b[1;31m support \x1b[0m\x1b[1;31m \x1b[0m🇰🇷 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다 \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Markup \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;35mRich\x1b[0m supports a simple \x1b[3mbbcode\x1b[0m-like \x1b[1mmarkup\x1b[0m for \x1b[33mcolor\x1b[0m, \x1b[4mstyle\x1b[0m, and emoji! 👍 🍎 🐜 🐻 … \n 🚌 \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Tables \x1b[0m\x1b[1;31m \x1b[0m\x1b[1m \x1b[0m\x1b[1;32mDate\x1b[0m\x1b[1m \x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1;34mTitle\x1b[0m\x1b[1m \x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1;36mProduction Budget\x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1m \x1b[0m\x1b[1;35mBox Office\x1b[0m\x1b[1m \x1b[0m \n ───────────────────────────────────────────────────────────────────────────────────── \n \x1b[32m \x1b[0m\x1b[32mDec 20, 2019\x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mStar Wars: The Rise of \x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m\x1b[36m $275,000,000\x1b[0m\x1b[36m \x1b[0m \x1b[35m \x1b[0m\x1b[35m $375,126,118\x1b[0m\x1b[35m \x1b[0m \n \x1b[34m \x1b[0m\x1b[34mSkywalker \x1b[0m\x1b[34m \x1b[0m \n \x1b[2;32m \x1b[0m\x1b[2;32mMay 25, 2018\x1b[0m\x1b[2;32m \x1b[0m \x1b[2;34m \x1b[0m\x1b[1;2;34mSolo\x1b[0m\x1b[2;34m: A Star Wars Story \x1b[0m\x1b[2;34m \x1b[0m \x1b[2;36m \x1b[0m\x1b[2;36m $275,000,000\x1b[0m\x1b[2;36m \x1b[0m \x1b[2;35m \x1b[0m\x1b[2;35m $393,151,347\x1b[0m\x1b[2;35m \x1b[0m \n \x1b[32m \x1b[0m\x1b[32mDec 15, 2017\x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mStar Wars Ep. VIII: The Last \x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m\x1b[36m $262,000,000\x1b[0m\x1b[36m \x1b[0m \x1b[35m \x1b[0m\x1b[1;35m$1,332,539,889\x1b[0m\x1b[35m \x1b[0m \n \x1b[34m \x1b[0m\x1b[34mJedi \x1b[0m\x1b[34m \x1b[0m \n \x1b[2;32m \x1b[0m\x1b[2;32mMay 19, 1999\x1b[0m\x1b[2;32m \x1b[0m \x1b[2;34m \x1b[0m\x1b[2;34mStar Wars Ep. \x1b[0m\x1b[1;2;34mI\x1b[0m\x1b[2;34m: \x1b[0m\x1b[2;3;34mThe phantom \x1b[0m\x1b[2;34m \x1b[0m\x1b[2;34m \x1b[0m \x1b[2;36m \x1b[0m\x1b[2;36m $115,000,000\x1b[0m\x1b[2;36m \x1b[0m \x1b[2;35m \x1b[0m\x1b[2;35m$1,027,044,677\x1b[0m\x1b[2;35m \x1b[0m \n \x1b[2m \x1b[0m \x1b[2;34m \x1b[0m\x1b[2;3;34mMenace\x1b[0m\x1b[2;34m \x1b[0m\x1b[2;34m \x1b[0m \x1b[2m \x1b[0m \x1b[2m \x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Syntax \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 1 \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mdef\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34miter_last\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mIterable\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m[\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mT\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m]\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m-\x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m>\x1b[0m \x1b[1m{\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31mhighlighting\x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34m\"""Iterate and generate a tuple w\x1b[0m \x1b[2;32m│ \x1b[0m\x1b[32m\'foo\'\x1b[0m: \x1b[1m[\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m & \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1;36m3.1427\x1b[0m, \n\x1b[1;31m \x1b[0m\x1b[1;31m pretty \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1m(\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m printing \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_va\x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m\'Paul Atreides\'\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m\'Vladimir Harkonnen\'\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mreturn\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m\'Thufir Hawat\'\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 8 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mfor\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34min\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1m)\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 9 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ \x1b[0m\x1b[1m]\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m10 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ \x1b[0m\x1b[32m\'atomic\'\x1b[0m: \x1b[1m(\x1b[0m\x1b[3;91mFalse\x1b[0m, \x1b[3;92mTrue\x1b[0m, \x1b[3;35mNone\x1b[0m\x1b[1m)\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m11 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[1m}\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Markdown \x1b[0m\x1b[1;31m \x1b[0m\x1b[36m# Markdown\x1b[0m ╔═══════════════════════════════════════╗ \n ║ \x1b[1mMarkdown\x1b[0m ║ \n \x1b[36mSupports much of the *markdown* \x1b[0m ╚═══════════════════════════════════════╝ \n \x1b[36m__syntax__!\x1b[0m \n Supports much of the \x1b[3mmarkdown\x1b[0m \x1b[1msyntax\x1b[0m! \n \x1b[36m- Headers\x1b[0m \n \x1b[36m- Basic formatting: **bold**, *italic*, \x1b[0m \x1b[1;33m • \x1b[0mHeaders \n \x1b[36m`code`\x1b[0m \x1b[1;33m • \x1b[0mBasic formatting: \x1b[1mbold\x1b[0m, \x1b[3mitalic\x1b[0m, \x1b[97;40mcode\x1b[0m \n \x1b[36m- Block quotes\x1b[0m \x1b[1;33m • \x1b[0mBlock quotes \n \x1b[36m- Lists, and more...\x1b[0m \x1b[1;33m • \x1b[0mLists, and more... \n \x1b[36m \x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m +more! \x1b[0m\x1b[1;31m \x1b[0mProgress bars, columns, styled logging handler, tracebacks, etc... \n\x1b[1;31m \x1b[0m \n"""
+expected = "\x1b[3m Rich features \x1b[0m\n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Colors \x1b[0m\x1b[1;31m \x1b[0m✓ \x1b[1;32m4-bit color\x1b[0m \x1b[38;2;86;0;0;48;2;51;0;0m▄\x1b[0m\x1b[38;2;86;9;0;48;2;51;5;0m▄\x1b[0m\x1b[38;2;86;18;0;48;2;51;11;0m▄\x1b[0m\x1b[38;2;86;28;0;48;2;51;16;0m▄\x1b[0m\x1b[38;2;86;37;0;48;2;51;22;0m▄\x1b[0m\x1b[38;2;86;47;0;48;2;51;27;0m▄\x1b[0m\x1b[38;2;86;56;0;48;2;51;33;0m▄\x1b[0m\x1b[38;2;86;66;0;48;2;51;38;0m▄\x1b[0m\x1b[38;2;86;75;0;48;2;51;44;0m▄\x1b[0m\x1b[38;2;86;85;0;48;2;51;50;0m▄\x1b[0m\x1b[38;2;78;86;0;48;2;46;51;0m▄\x1b[0m\x1b[38;2;69;86;0;48;2;40;51;0m▄\x1b[0m\x1b[38;2;59;86;0;48;2;35;51;0m▄\x1b[0m\x1b[38;2;50;86;0;48;2;29;51;0m▄\x1b[0m\x1b[38;2;40;86;0;48;2;24;51;0m▄\x1b[0m\x1b[38;2;31;86;0;48;2;18;51;0m▄\x1b[0m\x1b[38;2;22;86;0;48;2;12;51;0m▄\x1b[0m\x1b[38;2;12;86;0;48;2;7;51;0m▄\x1b[0m\x1b[38;2;3;86;0;48;2;1;51;0m▄\x1b[0m\x1b[38;2;0;86;6;48;2;0;51;3m▄\x1b[0m\x1b[38;2;0;86;15;48;2;0;51;9m▄\x1b[0m\x1b[38;2;0;86;25;48;2;0;51;14m▄\x1b[0m\x1b[38;2;0;86;34;48;2;0;51;20m▄\x1b[0m\x1b[38;2;0;86;44;48;2;0;51;25m▄\x1b[0m\x1b[38;2;0;86;53;48;2;0;51;31m▄\x1b[0m\x1b[38;2;0;86;63;48;2;0;51;37m▄\x1b[0m\x1b[38;2;0;86;72;48;2;0;51;42m▄\x1b[0m\x1b[38;2;0;86;81;48;2;0;51;48m▄\x1b[0m\x1b[38;2;0;81;86;48;2;0;48;51m▄\x1b[0m\x1b[38;2;0;72;86;48;2;0;42;51m▄\x1b[0m\x1b[38;2;0;63;86;48;2;0;37;51m▄\x1b[0m\x1b[38;2;0;53;86;48;2;0;31;51m▄\x1b[0m\x1b[38;2;0;44;86;48;2;0;25;51m▄\x1b[0m\x1b[38;2;0;34;86;48;2;0;20;51m▄\x1b[0m\x1b[38;2;0;25;86;48;2;0;14;51m▄\x1b[0m\x1b[38;2;0;15;86;48;2;0;9;51m▄\x1b[0m\x1b[38;2;0;6;86;48;2;0;3;51m▄\x1b[0m\x1b[38;2;3;0;86;48;2;1;0;51m▄\x1b[0m\x1b[38;2;12;0;86;48;2;7;0;51m▄\x1b[0m\x1b[38;2;22;0;86;48;2;12;0;51m▄\x1b[0m\x1b[38;2;31;0;86;48;2;18;0;51m▄\x1b[0m\x1b[38;2;40;0;86;48;2;24;0;51m▄\x1b[0m\x1b[38;2;50;0;86;48;2;29;0;51m▄\x1b[0m\x1b[38;2;59;0;86;48;2;35;0;51m▄\x1b[0m\x1b[38;2;69;0;86;48;2;40;0;51m▄\x1b[0m\x1b[38;2;78;0;86;48;2;46;0;51m▄\x1b[0m\x1b[38;2;86;0;85;48;2;51;0;50m▄\x1b[0m\x1b[38;2;86;0;75;48;2;51;0;44m▄\x1b[0m\x1b[38;2;86;0;66;48;2;51;0;38m▄\x1b[0m\x1b[38;2;86;0;56;48;2;51;0;33m▄\x1b[0m\x1b[38;2;86;0;47;48;2;51;0;27m▄\x1b[0m\x1b[38;2;86;0;37;48;2;51;0;22m▄\x1b[0m\x1b[38;2;86;0;28;48;2;51;0;16m▄\x1b[0m\x1b[38;2;86;0;18;48;2;51;0;11m▄\x1b[0m\x1b[38;2;86;0;9;48;2;51;0;5m▄\x1b[0m \n\x1b[1;31m \x1b[0m✓ \x1b[1;34m8-bit color\x1b[0m \x1b[38;2;158;0;0;48;2;122;0;0m▄\x1b[0m\x1b[38;2;158;17;0;48;2;122;13;0m▄\x1b[0m\x1b[38;2;158;34;0;48;2;122;26;0m▄\x1b[0m\x1b[38;2;158;51;0;48;2;122;40;0m▄\x1b[0m\x1b[38;2;158;68;0;48;2;122;53;0m▄\x1b[0m\x1b[38;2;158;86;0;48;2;122;66;0m▄\x1b[0m\x1b[38;2;158;103;0;48;2;122;80;0m▄\x1b[0m\x1b[38;2;158;120;0;48;2;122;93;0m▄\x1b[0m\x1b[38;2;158;137;0;48;2;122;106;0m▄\x1b[0m\x1b[38;2;158;155;0;48;2;122;120;0m▄\x1b[0m\x1b[38;2;143;158;0;48;2;111;122;0m▄\x1b[0m\x1b[38;2;126;158;0;48;2;97;122;0m▄\x1b[0m\x1b[38;2;109;158;0;48;2;84;122;0m▄\x1b[0m\x1b[38;2;91;158;0;48;2;71;122;0m▄\x1b[0m\x1b[38;2;74;158;0;48;2;57;122;0m▄\x1b[0m\x1b[38;2;57;158;0;48;2;44;122;0m▄\x1b[0m\x1b[38;2;40;158;0;48;2;31;122;0m▄\x1b[0m\x1b[38;2;22;158;0;48;2;17;122;0m▄\x1b[0m\x1b[38;2;5;158;0;48;2;4;122;0m▄\x1b[0m\x1b[38;2;0;158;11;48;2;0;122;8m▄\x1b[0m\x1b[38;2;0;158;28;48;2;0;122;22m▄\x1b[0m\x1b[38;2;0;158;45;48;2;0;122;35m▄\x1b[0m\x1b[38;2;0;158;63;48;2;0;122;48m▄\x1b[0m\x1b[38;2;0;158;80;48;2;0;122;62m▄\x1b[0m\x1b[38;2;0;158;97;48;2;0;122;75m▄\x1b[0m\x1b[38;2;0;158;114;48;2;0;122;89m▄\x1b[0m\x1b[38;2;0;158;132;48;2;0;122;102m▄\x1b[0m\x1b[38;2;0;158;149;48;2;0;122;115m▄\x1b[0m\x1b[38;2;0;149;158;48;2;0;115;122m▄\x1b[0m\x1b[38;2;0;132;158;48;2;0;102;122m▄\x1b[0m\x1b[38;2;0;114;158;48;2;0;89;122m▄\x1b[0m\x1b[38;2;0;97;158;48;2;0;75;122m▄\x1b[0m\x1b[38;2;0;80;158;48;2;0;62;122m▄\x1b[0m\x1b[38;2;0;63;158;48;2;0;48;122m▄\x1b[0m\x1b[38;2;0;45;158;48;2;0;35;122m▄\x1b[0m\x1b[38;2;0;28;158;48;2;0;22;122m▄\x1b[0m\x1b[38;2;0;11;158;48;2;0;8;122m▄\x1b[0m\x1b[38;2;5;0;158;48;2;4;0;122m▄\x1b[0m\x1b[38;2;22;0;158;48;2;17;0;122m▄\x1b[0m\x1b[38;2;40;0;158;48;2;31;0;122m▄\x1b[0m\x1b[38;2;57;0;158;48;2;44;0;122m▄\x1b[0m\x1b[38;2;74;0;158;48;2;57;0;122m▄\x1b[0m\x1b[38;2;91;0;158;48;2;71;0;122m▄\x1b[0m\x1b[38;2;109;0;158;48;2;84;0;122m▄\x1b[0m\x1b[38;2;126;0;158;48;2;97;0;122m▄\x1b[0m\x1b[38;2;143;0;158;48;2;111;0;122m▄\x1b[0m\x1b[38;2;158;0;155;48;2;122;0;120m▄\x1b[0m\x1b[38;2;158;0;137;48;2;122;0;106m▄\x1b[0m\x1b[38;2;158;0;120;48;2;122;0;93m▄\x1b[0m\x1b[38;2;158;0;103;48;2;122;0;80m▄\x1b[0m\x1b[38;2;158;0;86;48;2;122;0;66m▄\x1b[0m\x1b[38;2;158;0;68;48;2;122;0;53m▄\x1b[0m\x1b[38;2;158;0;51;48;2;122;0;40m▄\x1b[0m\x1b[38;2;158;0;34;48;2;122;0;26m▄\x1b[0m\x1b[38;2;158;0;17;48;2;122;0;13m▄\x1b[0m \n\x1b[1;31m \x1b[0m✓ \x1b[1;35mTruecolor (16.7 million)\x1b[0m \x1b[38;2;229;0;0;48;2;193;0;0m▄\x1b[0m\x1b[38;2;229;25;0;48;2;193;21;0m▄\x1b[0m\x1b[38;2;229;50;0;48;2;193;42;0m▄\x1b[0m\x1b[38;2;229;75;0;48;2;193;63;0m▄\x1b[0m\x1b[38;2;229;100;0;48;2;193;84;0m▄\x1b[0m\x1b[38;2;229;125;0;48;2;193;105;0m▄\x1b[0m\x1b[38;2;229;150;0;48;2;193;126;0m▄\x1b[0m\x1b[38;2;229;175;0;48;2;193;147;0m▄\x1b[0m\x1b[38;2;229;200;0;48;2;193;169;0m▄\x1b[0m\x1b[38;2;229;225;0;48;2;193;190;0m▄\x1b[0m\x1b[38;2;208;229;0;48;2;176;193;0m▄\x1b[0m\x1b[38;2;183;229;0;48;2;155;193;0m▄\x1b[0m\x1b[38;2;158;229;0;48;2;133;193;0m▄\x1b[0m\x1b[38;2;133;229;0;48;2;112;193;0m▄\x1b[0m\x1b[38;2;108;229;0;48;2;91;193;0m▄\x1b[0m\x1b[38;2;83;229;0;48;2;70;193;0m▄\x1b[0m\x1b[38;2;58;229;0;48;2;49;193;0m▄\x1b[0m\x1b[38;2;33;229;0;48;2;28;193;0m▄\x1b[0m\x1b[38;2;8;229;0;48;2;7;193;0m▄\x1b[0m\x1b[38;2;0;229;16;48;2;0;193;14m▄\x1b[0m\x1b[38;2;0;229;41;48;2;0;193;35m▄\x1b[0m\x1b[38;2;0;229;66;48;2;0;193;56m▄\x1b[0m\x1b[38;2;0;229;91;48;2;0;193;77m▄\x1b[0m\x1b[38;2;0;229;116;48;2;0;193;98m▄\x1b[0m\x1b[38;2;0;229;141;48;2;0;193;119m▄\x1b[0m\x1b[38;2;0;229;166;48;2;0;193;140m▄\x1b[0m\x1b[38;2;0;229;191;48;2;0;193;162m▄\x1b[0m\x1b[38;2;0;229;216;48;2;0;193;183m▄\x1b[0m\x1b[38;2;0;216;229;48;2;0;183;193m▄\x1b[0m\x1b[38;2;0;191;229;48;2;0;162;193m▄\x1b[0m\x1b[38;2;0;166;229;48;2;0;140;193m▄\x1b[0m\x1b[38;2;0;141;229;48;2;0;119;193m▄\x1b[0m\x1b[38;2;0;116;229;48;2;0;98;193m▄\x1b[0m\x1b[38;2;0;91;229;48;2;0;77;193m▄\x1b[0m\x1b[38;2;0;66;229;48;2;0;56;193m▄\x1b[0m\x1b[38;2;0;41;229;48;2;0;35;193m▄\x1b[0m\x1b[38;2;0;16;229;48;2;0;14;193m▄\x1b[0m\x1b[38;2;8;0;229;48;2;7;0;193m▄\x1b[0m\x1b[38;2;33;0;229;48;2;28;0;193m▄\x1b[0m\x1b[38;2;58;0;229;48;2;49;0;193m▄\x1b[0m\x1b[38;2;83;0;229;48;2;70;0;193m▄\x1b[0m\x1b[38;2;108;0;229;48;2;91;0;193m▄\x1b[0m\x1b[38;2;133;0;229;48;2;112;0;193m▄\x1b[0m\x1b[38;2;158;0;229;48;2;133;0;193m▄\x1b[0m\x1b[38;2;183;0;229;48;2;155;0;193m▄\x1b[0m\x1b[38;2;208;0;229;48;2;176;0;193m▄\x1b[0m\x1b[38;2;229;0;225;48;2;193;0;190m▄\x1b[0m\x1b[38;2;229;0;200;48;2;193;0;169m▄\x1b[0m\x1b[38;2;229;0;175;48;2;193;0;147m▄\x1b[0m\x1b[38;2;229;0;150;48;2;193;0;126m▄\x1b[0m\x1b[38;2;229;0;125;48;2;193;0;105m▄\x1b[0m\x1b[38;2;229;0;100;48;2;193;0;84m▄\x1b[0m\x1b[38;2;229;0;75;48;2;193;0;63m▄\x1b[0m\x1b[38;2;229;0;50;48;2;193;0;42m▄\x1b[0m\x1b[38;2;229;0;25;48;2;193;0;21m▄\x1b[0m \n\x1b[1;31m \x1b[0m✓ \x1b[1;33mDumb terminals\x1b[0m \x1b[38;2;254;45;45;48;2;255;10;10m▄\x1b[0m\x1b[38;2;254;68;45;48;2;255;36;10m▄\x1b[0m\x1b[38;2;254;91;45;48;2;255;63;10m▄\x1b[0m\x1b[38;2;254;114;45;48;2;255;90;10m▄\x1b[0m\x1b[38;2;254;137;45;48;2;255;117;10m▄\x1b[0m\x1b[38;2;254;159;45;48;2;255;143;10m▄\x1b[0m\x1b[38;2;254;182;45;48;2;255;170;10m▄\x1b[0m\x1b[38;2;254;205;45;48;2;255;197;10m▄\x1b[0m\x1b[38;2;254;228;45;48;2;255;223;10m▄\x1b[0m\x1b[38;2;254;251;45;48;2;255;250;10m▄\x1b[0m\x1b[38;2;235;254;45;48;2;232;255;10m▄\x1b[0m\x1b[38;2;213;254;45;48;2;206;255;10m▄\x1b[0m\x1b[38;2;190;254;45;48;2;179;255;10m▄\x1b[0m\x1b[38;2;167;254;45;48;2;152;255;10m▄\x1b[0m\x1b[38;2;144;254;45;48;2;125;255;10m▄\x1b[0m\x1b[38;2;121;254;45;48;2;99;255;10m▄\x1b[0m\x1b[38;2;99;254;45;48;2;72;255;10m▄\x1b[0m\x1b[38;2;76;254;45;48;2;45;255;10m▄\x1b[0m\x1b[38;2;53;254;45;48;2;19;255;10m▄\x1b[0m\x1b[38;2;45;254;61;48;2;10;255;28m▄\x1b[0m\x1b[38;2;45;254;83;48;2;10;255;54m▄\x1b[0m\x1b[38;2;45;254;106;48;2;10;255;81m▄\x1b[0m\x1b[38;2;45;254;129;48;2;10;255;108m▄\x1b[0m\x1b[38;2;45;254;152;48;2;10;255;134m▄\x1b[0m\x1b[38;2;45;254;175;48;2;10;255;161m▄\x1b[0m\x1b[38;2;45;254;197;48;2;10;255;188m▄\x1b[0m\x1b[38;2;45;254;220;48;2;10;255;214m▄\x1b[0m\x1b[38;2;45;254;243;48;2;10;255;241m▄\x1b[0m\x1b[38;2;45;243;254;48;2;10;241;255m▄\x1b[0m\x1b[38;2;45;220;254;48;2;10;214;255m▄\x1b[0m\x1b[38;2;45;197;254;48;2;10;188;255m▄\x1b[0m\x1b[38;2;45;175;254;48;2;10;161;255m▄\x1b[0m\x1b[38;2;45;152;254;48;2;10;134;255m▄\x1b[0m\x1b[38;2;45;129;254;48;2;10;108;255m▄\x1b[0m\x1b[38;2;45;106;254;48;2;10;81;255m▄\x1b[0m\x1b[38;2;45;83;254;48;2;10;54;255m▄\x1b[0m\x1b[38;2;45;61;254;48;2;10;28;255m▄\x1b[0m\x1b[38;2;53;45;254;48;2;19;10;255m▄\x1b[0m\x1b[38;2;76;45;254;48;2;45;10;255m▄\x1b[0m\x1b[38;2;99;45;254;48;2;72;10;255m▄\x1b[0m\x1b[38;2;121;45;254;48;2;99;10;255m▄\x1b[0m\x1b[38;2;144;45;254;48;2;125;10;255m▄\x1b[0m\x1b[38;2;167;45;254;48;2;152;10;255m▄\x1b[0m\x1b[38;2;190;45;254;48;2;179;10;255m▄\x1b[0m\x1b[38;2;213;45;254;48;2;206;10;255m▄\x1b[0m\x1b[38;2;235;45;254;48;2;232;10;255m▄\x1b[0m\x1b[38;2;254;45;251;48;2;255;10;250m▄\x1b[0m\x1b[38;2;254;45;228;48;2;255;10;223m▄\x1b[0m\x1b[38;2;254;45;205;48;2;255;10;197m▄\x1b[0m\x1b[38;2;254;45;182;48;2;255;10;170m▄\x1b[0m\x1b[38;2;254;45;159;48;2;255;10;143m▄\x1b[0m\x1b[38;2;254;45;137;48;2;255;10;117m▄\x1b[0m\x1b[38;2;254;45;114;48;2;255;10;90m▄\x1b[0m\x1b[38;2;254;45;91;48;2;255;10;63m▄\x1b[0m\x1b[38;2;254;45;68;48;2;255;10;36m▄\x1b[0m \n\x1b[1;31m \x1b[0m✓ \x1b[1;36mAutomatic color conversion\x1b[0m \x1b[38;2;255;117;117;48;2;255;81;81m▄\x1b[0m\x1b[38;2;255;132;117;48;2;255;100;81m▄\x1b[0m\x1b[38;2;255;147;117;48;2;255;119;81m▄\x1b[0m\x1b[38;2;255;162;117;48;2;255;138;81m▄\x1b[0m\x1b[38;2;255;177;117;48;2;255;157;81m▄\x1b[0m\x1b[38;2;255;192;117;48;2;255;176;81m▄\x1b[0m\x1b[38;2;255;207;117;48;2;255;195;81m▄\x1b[0m\x1b[38;2;255;222;117;48;2;255;214;81m▄\x1b[0m\x1b[38;2;255;237;117;48;2;255;232;81m▄\x1b[0m\x1b[38;2;255;252;117;48;2;255;251;81m▄\x1b[0m\x1b[38;2;242;255;117;48;2;239;255;81m▄\x1b[0m\x1b[38;2;227;255;117;48;2;220;255;81m▄\x1b[0m\x1b[38;2;212;255;117;48;2;201;255;81m▄\x1b[0m\x1b[38;2;197;255;117;48;2;182;255;81m▄\x1b[0m\x1b[38;2;182;255;117;48;2;163;255;81m▄\x1b[0m\x1b[38;2;167;255;117;48;2;144;255;81m▄\x1b[0m\x1b[38;2;152;255;117;48;2;125;255;81m▄\x1b[0m\x1b[38;2;137;255;117;48;2;106;255;81m▄\x1b[0m\x1b[38;2;122;255;117;48;2;87;255;81m▄\x1b[0m\x1b[38;2;117;255;127;48;2;81;255;94m▄\x1b[0m\x1b[38;2;117;255;142;48;2;81;255;113m▄\x1b[0m\x1b[38;2;117;255;157;48;2;81;255;132m▄\x1b[0m\x1b[38;2;117;255;172;48;2;81;255;150m▄\x1b[0m\x1b[38;2;117;255;187;48;2;81;255;169m▄\x1b[0m\x1b[38;2;117;255;202;48;2;81;255;188m▄\x1b[0m\x1b[38;2;117;255;217;48;2;81;255;207m▄\x1b[0m\x1b[38;2;117;255;232;48;2;81;255;226m▄\x1b[0m\x1b[38;2;117;255;247;48;2;81;255;245m▄\x1b[0m\x1b[38;2;117;247;255;48;2;81;245;255m▄\x1b[0m\x1b[38;2;117;232;255;48;2;81;226;255m▄\x1b[0m\x1b[38;2;117;217;255;48;2;81;207;255m▄\x1b[0m\x1b[38;2;117;202;255;48;2;81;188;255m▄\x1b[0m\x1b[38;2;117;187;255;48;2;81;169;255m▄\x1b[0m\x1b[38;2;117;172;255;48;2;81;150;255m▄\x1b[0m\x1b[38;2;117;157;255;48;2;81;132;255m▄\x1b[0m\x1b[38;2;117;142;255;48;2;81;113;255m▄\x1b[0m\x1b[38;2;117;127;255;48;2;81;94;255m▄\x1b[0m\x1b[38;2;122;117;255;48;2;87;81;255m▄\x1b[0m\x1b[38;2;137;117;255;48;2;106;81;255m▄\x1b[0m\x1b[38;2;152;117;255;48;2;125;81;255m▄\x1b[0m\x1b[38;2;167;117;255;48;2;144;81;255m▄\x1b[0m\x1b[38;2;182;117;255;48;2;163;81;255m▄\x1b[0m\x1b[38;2;197;117;255;48;2;182;81;255m▄\x1b[0m\x1b[38;2;212;117;255;48;2;201;81;255m▄\x1b[0m\x1b[38;2;227;117;255;48;2;220;81;255m▄\x1b[0m\x1b[38;2;242;117;255;48;2;239;81;255m▄\x1b[0m\x1b[38;2;255;117;252;48;2;255;81;251m▄\x1b[0m\x1b[38;2;255;117;237;48;2;255;81;232m▄\x1b[0m\x1b[38;2;255;117;222;48;2;255;81;214m▄\x1b[0m\x1b[38;2;255;117;207;48;2;255;81;195m▄\x1b[0m\x1b[38;2;255;117;192;48;2;255;81;176m▄\x1b[0m\x1b[38;2;255;117;177;48;2;255;81;157m▄\x1b[0m\x1b[38;2;255;117;162;48;2;255;81;138m▄\x1b[0m\x1b[38;2;255;117;147;48;2;255;81;119m▄\x1b[0m\x1b[38;2;255;117;132;48;2;255;81;100m▄\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Styles \x1b[0m\x1b[1;31m \x1b[0mAll ansi styles: \x1b[1mbold\x1b[0m, \x1b[2mdim\x1b[0m, \x1b[3mitalic\x1b[0m, \x1b[4munderline\x1b[0m, \x1b[9mstrikethrough\x1b[0m, \x1b[7mreverse\x1b[0m, and even \n\x1b[1;31m \x1b[0m\x1b[5mblink\x1b[0m. \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Text \x1b[0m\x1b[1;31m \x1b[0mWord wrap text. Justify \x1b[32mleft\x1b[0m, \x1b[33mcenter\x1b[0m, \x1b[34mright\x1b[0m or \x1b[31mfull\x1b[0m. \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32mLorem ipsum dolor \x1b[0m \x1b[33m Lorem ipsum dolor \x1b[0m \x1b[34m Lorem ipsum dolor\x1b[0m \x1b[31mLorem\x1b[0m\x1b[31m \x1b[0m\x1b[31mipsum\x1b[0m\x1b[31m \x1b[0m\x1b[31mdolor\x1b[0m\x1b[31m \x1b[0m\x1b[31msit\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32msit amet, \x1b[0m \x1b[33m sit amet, \x1b[0m \x1b[34m sit amet,\x1b[0m \x1b[31mamet,\x1b[0m\x1b[31m \x1b[0m\x1b[31mconsectetur\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32mconsectetur \x1b[0m \x1b[33m consectetur \x1b[0m \x1b[34m consectetur\x1b[0m \x1b[31madipiscing\x1b[0m\x1b[31m \x1b[0m\x1b[31melit.\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32madipiscing elit. \x1b[0m \x1b[33m adipiscing elit. \x1b[0m \x1b[34m adipiscing elit.\x1b[0m \x1b[31mQuisque\x1b[0m\x1b[31m \x1b[0m\x1b[31min\x1b[0m\x1b[31m \x1b[0m\x1b[31mmetus\x1b[0m\x1b[31m \x1b[0m\x1b[31msed\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32mQuisque in metus sed\x1b[0m \x1b[33mQuisque in metus sed\x1b[0m \x1b[34mQuisque in metus sed\x1b[0m \x1b[31msapien\x1b[0m\x1b[31m \x1b[0m\x1b[31multricies\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32msapien ultricies \x1b[0m \x1b[33m sapien ultricies \x1b[0m \x1b[34m sapien ultricies\x1b[0m \x1b[31mpretium\x1b[0m\x1b[31m \x1b[0m\x1b[31ma\x1b[0m\x1b[31m \x1b[0m\x1b[31mat\x1b[0m\x1b[31m \x1b[0m\x1b[31mjusto.\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32mpretium a at justo. \x1b[0m \x1b[33mpretium a at justo. \x1b[0m \x1b[34m pretium a at justo.\x1b[0m \x1b[31mMaecenas\x1b[0m\x1b[31m \x1b[0m\x1b[31mluctus\x1b[0m\x1b[31m \x1b[0m\x1b[31mvelit\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32mMaecenas luctus \x1b[0m \x1b[33m Maecenas luctus \x1b[0m \x1b[34m Maecenas luctus\x1b[0m \x1b[31met auctor maximus.\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32mvelit et auctor \x1b[0m \x1b[33m velit et auctor \x1b[0m \x1b[34m velit et auctor\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32mmaximus. \x1b[0m \x1b[33m maximus. \x1b[0m \x1b[34m maximus.\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Asian \x1b[0m\x1b[1;31m \x1b[0m🇨🇳 该库支持中文,日文和韩文文本! \n\x1b[1;31m \x1b[0m\x1b[1;31m language \x1b[0m\x1b[1;31m \x1b[0m🇯🇵 ライブラリは中国語、日本語、韓国語のテキストをサポートしています \n\x1b[1;31m \x1b[0m\x1b[1;31m support \x1b[0m\x1b[1;31m \x1b[0m🇰🇷 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다 \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Markup \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;35mRich\x1b[0m supports a simple \x1b[3mbbcode\x1b[0m-like \x1b[1mmarkup\x1b[0m for \x1b[33mcolor\x1b[0m, \x1b[4mstyle\x1b[0m, and emoji! 👍 🍎 🐜 🐻 … \n\x1b[1;31m \x1b[0m🚌 \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Tables \x1b[0m\x1b[1;31m \x1b[0m\x1b[1m \x1b[0m\x1b[1;32mDate\x1b[0m\x1b[1m \x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1;34mTitle\x1b[0m\x1b[1m \x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1;36mProduction Budget\x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1m \x1b[0m\x1b[1;35mBox Office\x1b[0m\x1b[1m \x1b[0m \n\x1b[1;31m \x1b[0m───────────────────────────────────────────────────────────────────────────────────── \n\x1b[1;31m \x1b[0m\x1b[32m \x1b[0m\x1b[32mDec 20, 2019\x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mStar Wars: The Rise of \x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m\x1b[36m $275,000,000\x1b[0m\x1b[36m \x1b[0m \x1b[35m \x1b[0m\x1b[35m $375,126,118\x1b[0m\x1b[35m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mSkywalker \x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m \x1b[35m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[2;32m \x1b[0m\x1b[2;32mMay 25, 2018\x1b[0m\x1b[2;32m \x1b[0m \x1b[2;34m \x1b[0m\x1b[1;2;34mSolo\x1b[0m\x1b[2;34m: A Star Wars Story \x1b[0m\x1b[2;34m \x1b[0m \x1b[2;36m \x1b[0m\x1b[2;36m $275,000,000\x1b[0m\x1b[2;36m \x1b[0m \x1b[2;35m \x1b[0m\x1b[2;35m $393,151,347\x1b[0m\x1b[2;35m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32m \x1b[0m\x1b[32mDec 15, 2017\x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mStar Wars Ep. VIII: The Last \x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m\x1b[36m $262,000,000\x1b[0m\x1b[36m \x1b[0m \x1b[35m \x1b[0m\x1b[1;35m$1,332,539,889\x1b[0m\x1b[35m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mJedi \x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m \x1b[35m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[2;32m \x1b[0m\x1b[2;32mMay 19, 1999\x1b[0m\x1b[2;32m \x1b[0m \x1b[2;34m \x1b[0m\x1b[2;34mStar Wars Ep. \x1b[0m\x1b[1;2;34mI\x1b[0m\x1b[2;34m: \x1b[0m\x1b[2;3;34mThe phantom \x1b[0m\x1b[2;34m \x1b[0m\x1b[2;34m \x1b[0m \x1b[2;36m \x1b[0m\x1b[2;36m $115,000,000\x1b[0m\x1b[2;36m \x1b[0m \x1b[2;35m \x1b[0m\x1b[2;35m$1,027,044,677\x1b[0m\x1b[2;35m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[2;32m \x1b[0m \x1b[2;34m \x1b[0m\x1b[2;3;34mMenace\x1b[0m\x1b[2;34m \x1b[0m\x1b[2;34m \x1b[0m \x1b[2;36m \x1b[0m \x1b[2;35m \x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Syntax \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 1 \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mdef\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34miter_last\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mIterable\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m[\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mT\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m]\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m-\x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m>\x1b[0m \x1b[1m{\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31mhighlighting\x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34m\"\"\"Iterate and generate a tuple w\x1b[0m \x1b[2;32m│ \x1b[0m\x1b[32m'foo'\x1b[0m: \x1b[1m[\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m & \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1;36m3.1427\x1b[0m, \n\x1b[1;31m \x1b[0m\x1b[1;31m pretty \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1m(\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m printing \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_va\x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Paul Atreides'\x1b[0m, \n\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Vladimir Harkonnen'\x1b[0m, \n\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mreturn\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Thufir Hawat'\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 8 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mfor\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34min\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1m)\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 9 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ \x1b[0m\x1b[1m]\x1b[0m, \n\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m10 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ \x1b[0m\x1b[32m'atomic'\x1b[0m: \x1b[1m(\x1b[0m\x1b[3;91mFalse\x1b[0m, \x1b[3;92mTrue\x1b[0m, \x1b[3;35mNone\x1b[0m\x1b[1m)\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m11 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[1m}\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Markdown \x1b[0m\x1b[1;31m \x1b[0m\x1b[36m# Markdown\x1b[0m ╔═══════════════════════════════════════╗ \n\x1b[1;31m \x1b[0m ║ \x1b[1mMarkdown\x1b[0m ║ \n\x1b[1;31m \x1b[0m\x1b[36mSupports much of the *markdown* \x1b[0m ╚═══════════════════════════════════════╝ \n\x1b[1;31m \x1b[0m\x1b[36m__syntax__!\x1b[0m \n\x1b[1;31m \x1b[0m Supports much of the \x1b[3mmarkdown\x1b[0m \x1b[1msyntax\x1b[0m! \n\x1b[1;31m \x1b[0m\x1b[36m- Headers\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[36m- Basic formatting: **bold**, *italic*, \x1b[0m \x1b[1;33m • \x1b[0mHeaders \n\x1b[1;31m \x1b[0m\x1b[36m`code`\x1b[0m \x1b[1;33m • \x1b[0mBasic formatting: \x1b[1mbold\x1b[0m, \x1b[3mitalic\x1b[0m, \x1b[97;40mcode\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[36m- Block quotes\x1b[0m \x1b[1;33m • \x1b[0mBlock quotes \n\x1b[1;31m \x1b[0m\x1b[36m- Lists, and more...\x1b[0m \x1b[1;33m • \x1b[0mLists, and more... \n\x1b[1;31m \x1b[0m\x1b[36m \x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m +more! \x1b[0m\x1b[1;31m \x1b[0mProgress bars, columns, styled logging handler, tracebacks, etc... \n\x1b[1;31m \x1b[0m \n"
diff --git a/tests/test_log.py b/tests/test_log.py
index 94a1f9e17b..22bb8b7b83 100644
--- a/tests/test_log.py
+++ b/tests/test_log.py
@@ -6,7 +6,6 @@
from rich.console import Console
-
re_link_ids = re.compile(r"id=[\d\.\-]*?;.*?\x1b")
@@ -38,7 +37,7 @@ def render_log():
def test_log():
expected = replace_link_ids(
- "\x1b[2;36m[TIME]\x1b[0m\x1b[2;36m \x1b[0m \x1b]8;id=0;foo\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;foo\x1b\\\x1b[2m33\x1b[0m\x1b]8;;\x1b\\\n\x1b[2;36m \x1b[0m\x1b[2;36m \x1b[0mHello from \x1b[1m<\x1b[0m\x1b[1;95mconsole\x1b[0m\x1b[39m \x1b[0m\x1b[33mwidth\x1b[0m\x1b[39m=\x1b[0m\x1b[1;36m80\x1b[0m\x1b[39m ColorSystem.TRUECOLOR\x1b[0m\x1b[1m>\x1b[0m ! \x1b]8;id=0;foo\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;foo\x1b\\\x1b[2m34\x1b[0m\x1b]8;;\x1b\\\n\x1b[2;36m \x1b[0m\x1b[2;36m \x1b[0m\x1b[1m[\x1b[0m\x1b[1;36m1\x1b[0m, \x1b[1;36m2\x1b[0m, \x1b[1;36m3\x1b[0m\x1b[1m]\x1b[0m \x1b]8;id=0;foo\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;foo\x1b\\\x1b[2m35\x1b[0m\x1b]8;;\x1b\\\n \x1b[34m╭─\x1b[0m\x1b[34m───────────────────── \x1b[0m\x1b[3;34mlocals\x1b[0m\x1b[34m ─────────────────────\x1b[0m\x1b[34m─╮\x1b[0m \n \x1b[34m│\x1b[0m \x1b[3;33mconsole\x1b[0m\x1b[31m =\x1b[0m \x1b[1m<\x1b[0m\x1b[1;95mconsole\x1b[0m\x1b[39m \x1b[0m\x1b[33mwidth\x1b[0m\x1b[39m=\x1b[0m\x1b[1;36m80\x1b[0m\x1b[39m ColorSystem.TRUECOLOR\x1b[0m\x1b[1m>\x1b[0m \x1b[34m│\x1b[0m \n \x1b[34m╰────────────────────────────────────────────────────╯\x1b[0m \n"
+ "\x1b[2;36m[TIME]\x1b[0m\x1b[2;36m \x1b[0m \x1b]8;id=0;foo\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;foo\x1b\\\x1b[2m32\x1b[0m\x1b]8;;\x1b\\\n\x1b[2;36m \x1b[0m\x1b[2;36m \x1b[0mHello from \x1b[1m<\x1b[0m\x1b[1;95mconsole\x1b[0m\x1b[39m \x1b[0m\x1b[33mwidth\x1b[0m\x1b[39m=\x1b[0m\x1b[1;36m80\x1b[0m\x1b[39m ColorSystem.TRUECOLOR\x1b[0m\x1b[1m>\x1b[0m ! \x1b]8;id=0;foo\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;foo\x1b\\\x1b[2m33\x1b[0m\x1b]8;;\x1b\\\n\x1b[2;36m \x1b[0m\x1b[2;36m \x1b[0m\x1b[1m[\x1b[0m\x1b[1;36m1\x1b[0m, \x1b[1;36m2\x1b[0m, \x1b[1;36m3\x1b[0m\x1b[1m]\x1b[0m \x1b]8;id=0;foo\x1b\\\x1b[2msource.py\x1b[0m\x1b]8;;\x1b\\\x1b[2m:\x1b[0m\x1b]8;id=0;foo\x1b\\\x1b[2m34\x1b[0m\x1b]8;;\x1b\\\n\x1b[2;36m \x1b[0m\x1b[34m╭─\x1b[0m\x1b[34m───────────────────── \x1b[0m\x1b[3;34mlocals\x1b[0m\x1b[34m ─────────────────────\x1b[0m\x1b[34m─╮\x1b[0m \x1b[2m \x1b[0m\n\x1b[2;36m \x1b[0m\x1b[34m│\x1b[0m \x1b[3;33mconsole\x1b[0m\x1b[31m =\x1b[0m \x1b[1m<\x1b[0m\x1b[1;95mconsole\x1b[0m\x1b[39m \x1b[0m\x1b[33mwidth\x1b[0m\x1b[39m=\x1b[0m\x1b[1;36m80\x1b[0m\x1b[39m ColorSystem.TRUECOLOR\x1b[0m\x1b[1m>\x1b[0m \x1b[34m│\x1b[0m \x1b[2m \x1b[0m\n\x1b[2;36m \x1b[0m\x1b[34m╰────────────────────────────────────────────────────╯\x1b[0m \x1b[2m \x1b[0m\n"
)
rendered = render_log()
print(repr(rendered))
diff --git a/tests/test_segment.py b/tests/test_segment.py
index 9aff243a83..9adf103299 100644
--- a/tests/test_segment.py
+++ b/tests/test_segment.py
@@ -2,8 +2,7 @@
import pytest
-from rich.segment import ControlType
-from rich.segment import Segment, Segments, SegmentLines
+from rich.segment import ControlType, Segment, SegmentLines, Segments
from rich.style import Style
@@ -179,8 +178,8 @@ def test_divide_complex():
"[on orange4] \n"
" [on green]XX[on orange4] \n"
)
- from rich.text import Text
from rich.console import Console
+ from rich.text import Text
text = Text.from_markup(MAP)
console = Console(
@@ -299,3 +298,33 @@ def test_segment_lines_renderable():
Segment("foo"),
Segment("\n"),
]
+
+
+def test_align_top():
+ lines = [[Segment("X")]]
+ assert Segment.align_top(lines, 3, 1, Style()) == lines
+ assert Segment.align_top(lines, 3, 3, Style()) == [
+ [Segment("X")],
+ [Segment(" ", Style())],
+ [Segment(" ", Style())],
+ ]
+
+
+def test_align_middle():
+ lines = [[Segment("X")]]
+ assert Segment.align_middle(lines, 3, 1, Style()) == lines
+ assert Segment.align_middle(lines, 3, 3, Style()) == [
+ [Segment(" ", Style())],
+ [Segment("X")],
+ [Segment(" ", Style())],
+ ]
+
+
+def test_align_bottom():
+ lines = [[Segment("X")]]
+ assert Segment.align_bottom(lines, 3, 1, Style()) == lines
+ assert Segment.align_bottom(lines, 3, 3, Style()) == [
+ [Segment(" ", Style())],
+ [Segment(" ", Style())],
+ [Segment("X")],
+ ]
diff --git a/tests/test_table.py b/tests/test_table.py
index 29826e1182..6fc863f20c 100644
--- a/tests/test_table.py
+++ b/tests/test_table.py
@@ -4,11 +4,12 @@
import pytest
-from rich import errors
+from rich import box, errors
+from rich.align import VerticalAlignMethod
from rich.console import Console
from rich.measure import Measurement
from rich.style import Style
-from rich.table import Table, Column
+from rich.table import Column, Table
from rich.text import Text
@@ -158,6 +159,29 @@ def test_get_row_style():
assert table.get_row_style(console, 1) == Style.parse("on red")
+def test_vertical_align_top():
+ console = Console(_environ={})
+
+ def make_table(vertical_align):
+ table = Table(show_header=False, box=box.SQUARE)
+ table.add_column(vertical=vertical_align)
+ table.add_row("foo", "\n".join(["bar"] * 5))
+
+ return table
+
+ with console.capture() as capture:
+ console.print(make_table("top"))
+ console.print()
+ console.print(make_table("middle"))
+ console.print()
+ console.print(make_table("bottom"))
+ console.print()
+ result = capture.get()
+ print(repr(result))
+ expected = "┌─────┬─────┐\n│ foo │ bar │\n│ │ bar │\n│ │ bar │\n│ │ bar │\n│ │ bar │\n└─────┴─────┘\n\n┌─────┬─────┐\n│ │ bar │\n│ │ bar │\n│ foo │ bar │\n│ │ bar │\n│ │ bar │\n└─────┴─────┘\n\n┌─────┬─────┐\n│ │ bar │\n│ │ bar │\n│ │ bar │\n│ │ bar │\n│ foo │ bar │\n└─────┴─────┘\n\n"
+ assert result == expected
+
+
if __name__ == "__main__":
render = render_tables()
print(render)
| diff --git a/CHANGELOG.md b/CHANGELOG.md
index abe01b9d04..5350b9f386 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,13 +5,16 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
## [11.0.0] - Unreleased
### Added
-- Fixed issue with pretty repr in jupyter notebook https://github.com/Textualize/rich/issues/1717
- Added max_depth arg to pretty printing
+- Added `vertical_align` to Table.add_row
+
+### Fixed
+
+- Fixed issue with pretty repr in jupyter notebook https://github.com/Textualize/rich/issues/1717
- Fix Traceback theme defaults override user supplied styles https://github.com/Textualize/rich/issues/1786
## [10.16.2] - 2021-01-02
diff --git a/docs/source/tables.rst b/docs/source/tables.rst
index 2603e7912a..967a573825 100644
--- a/docs/source/tables.rst
+++ b/docs/source/tables.rst
@@ -40,7 +40,44 @@ This produces the following output:
</pre>
-Rich is quite smart about rendering the table. It will adjust the column widths to fit the contents and will wrap text if it doesn't fit. You can also add anything that Rich knows how to render as a title or row cell (even another table)!
+Rich will calculate the optimal column sizes to fit your content, and will wrap text to fit if the terminal is not wide enough to fit the contents.
+
+.. note::
+ You are not limited to adding text in the ``add_row`` method. You can add anything that Rich knows how to render (including another table).
+
+Table Options
+~~~~~~~~~~~~~
+
+There are a number of keyword arguments on the Table constructor you can use to define how a table should look.
+
+- ``title`` Sets the title of the table (text show above the table).
+- ``caption`` Sets the table caption (text show below the table).
+- ``width`` Sets the desired width of the table (disables automatic width calculation).
+- ``min_width`` Sets a minimum width for the table.
+- ``box`` Sets one of the :ref:`appendix_box` styles for the table grid, or ``None`` for no grid.
+- ``safe_box`` Set to ``True`` to force the table to generate ASCII characters rather than unicode.
+- ``padding`` A integer, or tuple of 1, 2, or 4 values to set the padding on cells.
+- ``collapse_padding`` If True the padding of neighboring cells will be merged.
+- ``pad_edge`` Set to False to remove padding around the edge of the table.
+- ``expand`` Set to True to expand the table to the full available size.
+- ``show_header`` Set to True to show a header, False to disable it.
+- ``show_footer`` Set to True to show a footer, False to disable it.
+- ``show edge`` Set to False to disable the edge line around the table.
+- ``show_lines`` Set to True to show lines between rows as well as header / footer.
+- ``leading`` Additional space between rows.
+- ``style`` A Style to apply to the entire table, e.g. "on blue"
+- ``row_styles`` Set to a list of styles to style alternating rows. e.g. ``["dim", ""]`` to create *zebra stripes*
+- ``header_style`` Set the default style for the header.
+- ``footer_style`` Set the default style for the footer.
+- ``border style`` Set a style for border characters.
+- ``title_style`` Set a style for the title.
+- ``caption_style`` Set a style for the caption.
+- ``title_justify`` Set the title justify method ("left", "right", "center", or "full")
+- ``caption_justify`` Set the caption justify method ("left", "right", "center", or "full")
+- ``highlight`` Set to True to enable automatic highlighting of cell contents.
+
+Border Styles
+~~~~~~~~~~~~~
You can set the border style by importing one of the preset :class:`~rich.box.Box` objects and setting the ``box`` argument in the table constructor. Here's an example that modifies the look of the Star Wars table::
@@ -49,9 +86,17 @@ You can set the border style by importing one of the preset :class:`~rich.box.Bo
See :ref:`appendix_box` for other box styles.
+You can also set ``box=None`` to remove borders entirely.
+
The :class:`~rich.table.Table` class offers a number of configuration options to set the look and feel of the table, including how borders are rendered and the style and alignment of the columns.
+Lines
+~~~~~
+
+By default, Tables will show a line under the header only. If you want to show lines between all rows add ``show_lines=True`` to the constructor.
+
+
Empty Tables
~~~~~~~~~~~~
@@ -80,10 +125,29 @@ This allows you to specify the text of the column only. If you want to set other
title="Star Wars Movies"
)
-Lines
-~~~~~
+Column Options
+~~~~~~~~~~~~~~
-By default, Tables will show a line under the header only. If you want to show lines between all rows add ``show_lines=True`` to the constructor.
+There are a number of options you can set on a column to modify how it will look.
+
+- ``header_style`` Sets the style of the header, e.g. "bold magenta".
+- ``footer_style`` Sets the style of the footer.
+- ``style`` Sets a style that applies to the column. You could use this to highlight a column by setting the background with "on green" for example.
+- ``justify`` Sets the text justify to one of "left", "center", "right", or "full".
+- ``vertical`` Sets the vertical alignment of the cells in a column, to one of "top", "middle", or "bottom".
+- ``width`` Explicitly set the width of a row to a given number of characters (disables automatic calculation).
+- ``min_width`` When set to an integer will prevent the column from shrinking below this amount.
+- ``max_width`` When set to an integer will prevent the column from growing beyond this amount.
+- ``ratio`` Defines a ratio to set the column width. For instance, if there are 3 columns with a total of 6 ratio, and ``ratio=2`` then the column will be a third of the available size.
+- ``no_wrap`` Set to False to prevent this column from wrapping.
+
+Vertical Alignment
+~~~~~~~~~~~~~~~~~~
+
+You can define the vertical alignment of a column by setting the ``vertical`` parameter of the column. You can also do this per-cell by wrapping your text or renderable with a :class:`~rich.align.Align` class::
+
+
+ table.add_row(Align("Title", vertical="middle"))
Grids
~~~~~
| [
{
"components": [
{
"doc": "Aligns lines to top (adds extra lines to bottom as required).\n\n Args:\n lines (List[List[Segment]]): A list of lines.\n width (int): Desired width.\n height (int, optional): Desired height or None for no change.\n sty... | [
"tests/test_log.py::test_log",
"tests/test_segment.py::test_align_top",
"tests/test_segment.py::test_align_middle",
"tests/test_segment.py::test_align_bottom",
"tests/test_table.py::test_vertical_align_top"
] | [
"tests/test_log.py::test_log_caller_frame_info",
"tests/test_log.py::test_justify",
"tests/test_segment.py::test_repr",
"tests/test_segment.py::test_line",
"tests/test_segment.py::test_apply_style",
"tests/test_segment.py::test_split_lines",
"tests/test_segment.py::test_split_and_crop_lines",
"tests/t... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Table vertical align
Add `vertical_align` parameter to `Table.add_column`
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rich/segment.py]
(definition of Segment.align_top:)
def align_top( cls: Type["Segment"], lines: List[List["Segment"]], width: int, height: int, style: Style, new_lines: bool = False, ) -> List[List["Segment"]]:
"""Aligns lines to top (adds extra lines to bottom as required).
Args:
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
style (Style): Style of any padding added.
new_lines (bool, optional): Padded lines should include "
". Defaults to False.
Returns:
List[List[Segment]]: New list of lines.
"""
(definition of Segment.align_bottom:)
def align_bottom( cls: Type["Segment"], lines: List[List["Segment"]], width: int, height: int, style: Style, new_lines: bool = False, ) -> List[List["Segment"]]:
"""Aligns render to bottom (adds extra lines above as required).
Args:
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
style (Style): Style of any padding added. Defaults to None.
new_lines (bool, optional): Padded lines should include "
". Defaults to False.
Returns:
List[List[Segment]]: New list of lines.
"""
(definition of Segment.align_middle:)
def align_middle( cls: Type["Segment"], lines: List[List["Segment"]], width: int, height: int, style: Style, new_lines: bool = False, ) -> List[List["Segment"]]:
"""Aligns lines to middle (adds extra lines to above and below as required).
Args:
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
style (Style): Style of any padding added.
new_lines (bool, optional): Padded lines should include "
". Defaults to False.
Returns:
List[List[Segment]]: New list of lines.
"""
[end of new definitions in rich/segment.py]
[start of new definitions in rich/table.py]
(definition of Table._render.align_cell:)
def align_cell( cell: List[List[Segment]], vertical: "VerticalAlignMethod", width: int, style: Style, ) -> List[List[Segment]]:
[end of new definitions in rich/table.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | b0661de34bab35af9b4b1d3ba8e28b186b225e84 | |
pydata__xarray-6135 | 6,135 | pydata/xarray | 0.20 | 48290fa14accd3ac87768d3f73d69493b82b0be6 | 2022-01-04T15:28:16Z | diff --git a/doc/whats-new.rst b/doc/whats-new.rst
index a8cd952609c..6c71407cd46 100644
--- a/doc/whats-new.rst
+++ b/doc/whats-new.rst
@@ -19,9 +19,15 @@ What's New
v2022.02.0 (unreleased)
-----------------------
+
New Features
~~~~~~~~~~~~
+- Enabled multiplying tick offsets by floats. Allows ``float`` ``n`` in
+ :py:meth:`CFTimeIndex.shift` if ``shift_freq`` is between ``Day``
+ and ``Microsecond``. (:issue:`6134`, :pull:`6135`).
+ By `Aaron Spring <https://github.com/aaronspring>`_.
+
Breaking changes
~~~~~~~~~~~~~~~~
@@ -42,6 +48,7 @@ Documentation
~~~~~~~~~~~~~
+
Internal Changes
~~~~~~~~~~~~~~~~
@@ -82,6 +89,7 @@ New Features
- Enable the limit option for dask array in the following methods :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`Dataset.ffill` and :py:meth:`Dataset.bfill` (:issue:`6112`)
By `Joseph Nowak <https://github.com/josephnowak>`_.
+
Breaking changes
~~~~~~~~~~~~~~~~
- Rely on matplotlib's default datetime converters instead of pandas' (:issue:`6102`, :pull:`6109`).
diff --git a/xarray/coding/cftime_offsets.py b/xarray/coding/cftime_offsets.py
index 30bfd882b5c..a4e2870650d 100644
--- a/xarray/coding/cftime_offsets.py
+++ b/xarray/coding/cftime_offsets.py
@@ -39,11 +39,12 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import annotations
import re
from datetime import datetime, timedelta
from functools import partial
-from typing import ClassVar, Optional
+from typing import ClassVar
import numpy as np
import pandas as pd
@@ -87,10 +88,10 @@ def get_date_type(calendar, use_cftime=True):
class BaseCFTimeOffset:
- _freq: ClassVar[Optional[str]] = None
- _day_option: ClassVar[Optional[str]] = None
+ _freq: ClassVar[str | None] = None
+ _day_option: ClassVar[str | None] = None
- def __init__(self, n=1):
+ def __init__(self, n: int = 1):
if not isinstance(n, int):
raise TypeError(
"The provided multiple 'n' must be an integer. "
@@ -122,6 +123,8 @@ def __sub__(self, other):
return NotImplemented
def __mul__(self, other):
+ if not isinstance(other, int):
+ return NotImplemented
return type(self)(n=other * self.n)
def __neg__(self):
@@ -171,6 +174,40 @@ def _get_offset_day(self, other):
return _get_day_of_month(other, self._day_option)
+class Tick(BaseCFTimeOffset):
+ # analogous https://github.com/pandas-dev/pandas/blob/ccb25ab1d24c4fb9691270706a59c8d319750870/pandas/_libs/tslibs/offsets.pyx#L806
+
+ def _next_higher_resolution(self):
+ self_type = type(self)
+ if self_type not in [Day, Hour, Minute, Second, Millisecond]:
+ raise ValueError("Could not convert to integer offset at any resolution")
+ if type(self) is Day:
+ return Hour(self.n * 24)
+ if type(self) is Hour:
+ return Minute(self.n * 60)
+ if type(self) is Minute:
+ return Second(self.n * 60)
+ if type(self) is Second:
+ return Millisecond(self.n * 1000)
+ if type(self) is Millisecond:
+ return Microsecond(self.n * 1000)
+
+ def __mul__(self, other):
+ if not isinstance(other, (int, float)):
+ return NotImplemented
+ if isinstance(other, float):
+ n = other * self.n
+ # If the new `n` is an integer, we can represent it using the
+ # same BaseCFTimeOffset subclass as self, otherwise we need to move up
+ # to a higher-resolution subclass
+ if np.isclose(n % 1, 0):
+ return type(self)(int(n))
+
+ new_self = self._next_higher_resolution()
+ return new_self * other
+ return type(self)(n=other * self.n)
+
+
def _get_day_of_month(other, day_option):
"""Find the day in `other`'s month that satisfies a BaseCFTimeOffset's
onOffset policy, as described by the `day_option` argument.
@@ -396,6 +433,8 @@ def __sub__(self, other):
return NotImplemented
def __mul__(self, other):
+ if isinstance(other, float):
+ return NotImplemented
return type(self)(n=other * self.n, month=self.month)
def rule_code(self):
@@ -482,6 +521,8 @@ def __sub__(self, other):
return NotImplemented
def __mul__(self, other):
+ if isinstance(other, float):
+ return NotImplemented
return type(self)(n=other * self.n, month=self.month)
def rule_code(self):
@@ -541,7 +582,7 @@ def rollback(self, date):
return date - YearEnd(month=self.month)
-class Day(BaseCFTimeOffset):
+class Day(Tick):
_freq = "D"
def as_timedelta(self):
@@ -551,7 +592,7 @@ def __apply__(self, other):
return other + self.as_timedelta()
-class Hour(BaseCFTimeOffset):
+class Hour(Tick):
_freq = "H"
def as_timedelta(self):
@@ -561,7 +602,7 @@ def __apply__(self, other):
return other + self.as_timedelta()
-class Minute(BaseCFTimeOffset):
+class Minute(Tick):
_freq = "T"
def as_timedelta(self):
@@ -571,7 +612,7 @@ def __apply__(self, other):
return other + self.as_timedelta()
-class Second(BaseCFTimeOffset):
+class Second(Tick):
_freq = "S"
def as_timedelta(self):
@@ -581,7 +622,7 @@ def __apply__(self, other):
return other + self.as_timedelta()
-class Millisecond(BaseCFTimeOffset):
+class Millisecond(Tick):
_freq = "L"
def as_timedelta(self):
@@ -591,7 +632,7 @@ def __apply__(self, other):
return other + self.as_timedelta()
-class Microsecond(BaseCFTimeOffset):
+class Microsecond(Tick):
_freq = "U"
def as_timedelta(self):
diff --git a/xarray/coding/cftimeindex.py b/xarray/coding/cftimeindex.py
index ac6904d4e31..20d5206b797 100644
--- a/xarray/coding/cftimeindex.py
+++ b/xarray/coding/cftimeindex.py
@@ -38,11 +38,11 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import annotations
import re
import warnings
from datetime import timedelta
-from typing import Tuple, Type
import numpy as np
import pandas as pd
@@ -66,7 +66,7 @@
REPR_ELLIPSIS_SHOW_ITEMS_FRONT_END = 10
-OUT_OF_BOUNDS_TIMEDELTA_ERRORS: Tuple[Type[Exception], ...]
+OUT_OF_BOUNDS_TIMEDELTA_ERRORS: tuple[type[Exception], ...]
try:
OUT_OF_BOUNDS_TIMEDELTA_ERRORS = (pd.errors.OutOfBoundsTimedelta, OverflowError)
except AttributeError:
@@ -511,7 +511,7 @@ def contains(self, key):
"""Needed for .loc based partial-string indexing"""
return self.__contains__(key)
- def shift(self, n, freq):
+ def shift(self, n: int | float, freq: str | timedelta):
"""Shift the CFTimeIndex a multiple of the given frequency.
See the documentation for :py:func:`~xarray.cftime_range` for a
@@ -519,7 +519,7 @@ def shift(self, n, freq):
Parameters
----------
- n : int
+ n : int, float if freq of days or below
Periods to shift by
freq : str or datetime.timedelta
A frequency string or datetime.timedelta object to shift by
@@ -541,14 +541,15 @@ def shift(self, n, freq):
>>> index.shift(1, "M")
CFTimeIndex([2000-02-29 00:00:00],
dtype='object', length=1, calendar='standard', freq=None)
+ >>> index.shift(1.5, "D")
+ CFTimeIndex([2000-02-01 12:00:00],
+ dtype='object', length=1, calendar='standard', freq=None)
"""
- from .cftime_offsets import to_offset
-
- if not isinstance(n, int):
- raise TypeError(f"'n' must be an int, got {n}.")
if isinstance(freq, timedelta):
return self + n * freq
elif isinstance(freq, str):
+ from .cftime_offsets import to_offset
+
return self + n * to_offset(freq)
else:
raise TypeError(
| diff --git a/xarray/tests/test_cftime_offsets.py b/xarray/tests/test_cftime_offsets.py
index 4f94b35e3c3..3879959675f 100644
--- a/xarray/tests/test_cftime_offsets.py
+++ b/xarray/tests/test_cftime_offsets.py
@@ -18,6 +18,7 @@
QuarterBegin,
QuarterEnd,
Second,
+ Tick,
YearBegin,
YearEnd,
_days_in_month,
@@ -54,11 +55,25 @@ def calendar(request):
(YearEnd(), 1),
(QuarterBegin(), 1),
(QuarterEnd(), 1),
+ (Tick(), 1),
+ (Day(), 1),
+ (Hour(), 1),
+ (Minute(), 1),
+ (Second(), 1),
+ (Millisecond(), 1),
+ (Microsecond(), 1),
(BaseCFTimeOffset(n=2), 2),
(YearBegin(n=2), 2),
(YearEnd(n=2), 2),
(QuarterBegin(n=2), 2),
(QuarterEnd(n=2), 2),
+ (Tick(n=2), 2),
+ (Day(n=2), 2),
+ (Hour(n=2), 2),
+ (Minute(n=2), 2),
+ (Second(n=2), 2),
+ (Millisecond(n=2), 2),
+ (Microsecond(n=2), 2),
],
ids=_id_func,
)
@@ -74,6 +89,15 @@ def test_cftime_offset_constructor_valid_n(offset, expected_n):
(YearEnd, 1.5),
(QuarterBegin, 1.5),
(QuarterEnd, 1.5),
+ (MonthBegin, 1.5),
+ (MonthEnd, 1.5),
+ (Tick, 1.5),
+ (Day, 1.5),
+ (Hour, 1.5),
+ (Minute, 1.5),
+ (Second, 1.5),
+ (Millisecond, 1.5),
+ (Microsecond, 1.5),
],
ids=_id_func,
)
@@ -359,30 +383,64 @@ def test_eq(a, b):
_MUL_TESTS = [
- (BaseCFTimeOffset(), BaseCFTimeOffset(n=3)),
- (YearEnd(), YearEnd(n=3)),
- (YearBegin(), YearBegin(n=3)),
- (QuarterEnd(), QuarterEnd(n=3)),
- (QuarterBegin(), QuarterBegin(n=3)),
- (MonthEnd(), MonthEnd(n=3)),
- (MonthBegin(), MonthBegin(n=3)),
- (Day(), Day(n=3)),
- (Hour(), Hour(n=3)),
- (Minute(), Minute(n=3)),
- (Second(), Second(n=3)),
- (Millisecond(), Millisecond(n=3)),
- (Microsecond(), Microsecond(n=3)),
+ (BaseCFTimeOffset(), 3, BaseCFTimeOffset(n=3)),
+ (YearEnd(), 3, YearEnd(n=3)),
+ (YearBegin(), 3, YearBegin(n=3)),
+ (QuarterEnd(), 3, QuarterEnd(n=3)),
+ (QuarterBegin(), 3, QuarterBegin(n=3)),
+ (MonthEnd(), 3, MonthEnd(n=3)),
+ (MonthBegin(), 3, MonthBegin(n=3)),
+ (Tick(), 3, Tick(n=3)),
+ (Day(), 3, Day(n=3)),
+ (Hour(), 3, Hour(n=3)),
+ (Minute(), 3, Minute(n=3)),
+ (Second(), 3, Second(n=3)),
+ (Millisecond(), 3, Millisecond(n=3)),
+ (Microsecond(), 3, Microsecond(n=3)),
+ (Day(), 0.5, Hour(n=12)),
+ (Hour(), 0.5, Minute(n=30)),
+ (Minute(), 0.5, Second(n=30)),
+ (Second(), 0.5, Millisecond(n=500)),
+ (Millisecond(), 0.5, Microsecond(n=500)),
]
-@pytest.mark.parametrize(("offset", "expected"), _MUL_TESTS, ids=_id_func)
-def test_mul(offset, expected):
- assert offset * 3 == expected
+@pytest.mark.parametrize(("offset", "multiple", "expected"), _MUL_TESTS, ids=_id_func)
+def test_mul(offset, multiple, expected):
+ assert offset * multiple == expected
-@pytest.mark.parametrize(("offset", "expected"), _MUL_TESTS, ids=_id_func)
-def test_rmul(offset, expected):
- assert 3 * offset == expected
+@pytest.mark.parametrize(("offset", "multiple", "expected"), _MUL_TESTS, ids=_id_func)
+def test_rmul(offset, multiple, expected):
+ assert multiple * offset == expected
+
+
+def test_mul_float_multiple_next_higher_resolution():
+ """Test more than one iteration through _next_higher_resolution is required."""
+ assert 1e-6 * Second() == Microsecond()
+ assert 1e-6 / 60 * Minute() == Microsecond()
+
+
+@pytest.mark.parametrize(
+ "offset",
+ [YearBegin(), YearEnd(), QuarterBegin(), QuarterEnd(), MonthBegin(), MonthEnd()],
+ ids=_id_func,
+)
+def test_nonTick_offset_multiplied_float_error(offset):
+ """Test that the appropriate error is raised if a non-Tick offset is
+ multiplied by a float."""
+ with pytest.raises(TypeError, match="unsupported operand type"):
+ offset * 0.5
+
+
+def test_Microsecond_multiplied_float_error():
+ """Test that the appropriate error is raised if a Tick offset is multiplied
+ by a float which causes it not to be representable by a
+ microsecond-precision timedelta."""
+ with pytest.raises(
+ ValueError, match="Could not convert to integer offset at any resolution"
+ ):
+ Microsecond() * 0.5
@pytest.mark.parametrize(
diff --git a/xarray/tests/test_cftimeindex.py b/xarray/tests/test_cftimeindex.py
index 94f0cf4c2a5..28f0ef499d2 100644
--- a/xarray/tests/test_cftimeindex.py
+++ b/xarray/tests/test_cftimeindex.py
@@ -754,7 +754,7 @@ def test_cftimeindex_add(index):
@requires_cftime
@pytest.mark.parametrize("calendar", _CFTIME_CALENDARS)
-def test_cftimeindex_add_timedeltaindex(calendar):
+def test_cftimeindex_add_timedeltaindex(calendar) -> None:
a = xr.cftime_range("2000", periods=5, calendar=calendar)
deltas = pd.TimedeltaIndex([timedelta(days=2) for _ in range(5)])
result = a + deltas
@@ -763,6 +763,44 @@ def test_cftimeindex_add_timedeltaindex(calendar):
assert isinstance(result, CFTimeIndex)
+@requires_cftime
+@pytest.mark.parametrize("n", [2.0, 1.5])
+@pytest.mark.parametrize(
+ "freq,units",
+ [
+ ("D", "D"),
+ ("H", "H"),
+ ("T", "min"),
+ ("S", "S"),
+ ("L", "ms"),
+ ],
+)
+@pytest.mark.parametrize("calendar", _CFTIME_CALENDARS)
+def test_cftimeindex_shift_float(n, freq, units, calendar) -> None:
+ a = xr.cftime_range("2000", periods=3, calendar=calendar, freq="D")
+ result = a + pd.Timedelta(n, units)
+ expected = a.shift(n, freq)
+ assert result.equals(expected)
+ assert isinstance(result, CFTimeIndex)
+
+
+@requires_cftime
+def test_cftimeindex_shift_float_us() -> None:
+ a = xr.cftime_range("2000", periods=3, freq="D")
+ with pytest.raises(
+ ValueError, match="Could not convert to integer offset at any resolution"
+ ):
+ a.shift(2.5, "us")
+
+
+@requires_cftime
+@pytest.mark.parametrize("freq", ["AS", "A", "YS", "Y", "QS", "Q", "MS", "M"])
+def test_cftimeindex_shift_float_fails_for_non_tick_freqs(freq) -> None:
+ a = xr.cftime_range("2000", periods=3, freq="D")
+ with pytest.raises(TypeError, match="unsupported operand type"):
+ a.shift(2.5, freq)
+
+
@requires_cftime
def test_cftimeindex_radd(index):
date_type = index.date_type
@@ -780,7 +818,7 @@ def test_cftimeindex_radd(index):
@requires_cftime
@pytest.mark.parametrize("calendar", _CFTIME_CALENDARS)
-def test_timedeltaindex_add_cftimeindex(calendar):
+def test_timedeltaindex_add_cftimeindex(calendar) -> None:
a = xr.cftime_range("2000", periods=5, calendar=calendar)
deltas = pd.TimedeltaIndex([timedelta(days=2) for _ in range(5)])
result = deltas + a
@@ -828,7 +866,7 @@ def test_cftimeindex_sub_timedelta_array(index, other):
@requires_cftime
@pytest.mark.parametrize("calendar", _CFTIME_CALENDARS)
-def test_cftimeindex_sub_cftimeindex(calendar):
+def test_cftimeindex_sub_cftimeindex(calendar) -> None:
a = xr.cftime_range("2000", periods=5, calendar=calendar)
b = a.shift(2, "D")
result = b - a
@@ -867,7 +905,7 @@ def test_distant_cftime_datetime_sub_cftimeindex(calendar):
@requires_cftime
@pytest.mark.parametrize("calendar", _CFTIME_CALENDARS)
-def test_cftimeindex_sub_timedeltaindex(calendar):
+def test_cftimeindex_sub_timedeltaindex(calendar) -> None:
a = xr.cftime_range("2000", periods=5, calendar=calendar)
deltas = pd.TimedeltaIndex([timedelta(days=2) for _ in range(5)])
result = a - deltas
@@ -903,7 +941,7 @@ def test_cftimeindex_rsub(index):
@requires_cftime
@pytest.mark.parametrize("freq", ["D", timedelta(days=1)])
-def test_cftimeindex_shift(index, freq):
+def test_cftimeindex_shift(index, freq) -> None:
date_type = index.date_type
expected_dates = [
date_type(1, 1, 3),
@@ -918,14 +956,14 @@ def test_cftimeindex_shift(index, freq):
@requires_cftime
-def test_cftimeindex_shift_invalid_n():
+def test_cftimeindex_shift_invalid_n() -> None:
index = xr.cftime_range("2000", periods=3)
with pytest.raises(TypeError):
index.shift("a", "D")
@requires_cftime
-def test_cftimeindex_shift_invalid_freq():
+def test_cftimeindex_shift_invalid_freq() -> None:
index = xr.cftime_range("2000", periods=3)
with pytest.raises(TypeError):
index.shift(1, 1)
| diff --git a/doc/whats-new.rst b/doc/whats-new.rst
index a8cd952609c..6c71407cd46 100644
--- a/doc/whats-new.rst
+++ b/doc/whats-new.rst
@@ -19,9 +19,15 @@ What's New
v2022.02.0 (unreleased)
-----------------------
+
New Features
~~~~~~~~~~~~
+- Enabled multiplying tick offsets by floats. Allows ``float`` ``n`` in
+ :py:meth:`CFTimeIndex.shift` if ``shift_freq`` is between ``Day``
+ and ``Microsecond``. (:issue:`6134`, :pull:`6135`).
+ By `Aaron Spring <https://github.com/aaronspring>`_.
+
Breaking changes
~~~~~~~~~~~~~~~~
@@ -42,6 +48,7 @@ Documentation
~~~~~~~~~~~~~
+
Internal Changes
~~~~~~~~~~~~~~~~
@@ -82,6 +89,7 @@ New Features
- Enable the limit option for dask array in the following methods :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`Dataset.ffill` and :py:meth:`Dataset.bfill` (:issue:`6112`)
By `Joseph Nowak <https://github.com/josephnowak>`_.
+
Breaking changes
~~~~~~~~~~~~~~~~
- Rely on matplotlib's default datetime converters instead of pandas' (:issue:`6102`, :pull:`6109`).
| [
{
"components": [
{
"doc": "",
"lines": [
177,
208
],
"name": "Tick",
"signature": "class Tick(BaseCFTimeOffset):",
"type": "class"
},
{
"doc": "",
"lines": [
180,
193
],
"na... | [
"xarray/tests/test_cftime_offsets.py::test_cftime_offset_constructor_valid_n[<BaseCFTimeOffset:",
"xarray/tests/test_cftime_offsets.py::test_cftime_offset_constructor_valid_n[<YearBegin:",
"xarray/tests/test_cftime_offsets.py::test_cftime_offset_constructor_valid_n[<YearEnd:",
"xarray/tests/test_cftime_offset... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Implement multiplication of cftime Tick offsets by floats
<!-- Feel free to remove check-list items aren't relevant to your change -->
- [x] Closes #6134
- [x] Tests added
- [x] User visible changes (including notable bug fixes) are documented in `whats-new.rst`
- [x] ~~New functions/methods are listed in `api.rst`~~
---
- `shift` allows `float` with freq `D`, `H`, `min`, `S`, `ms`
---
Refs:
- https://docs.python.org/3/library/datetime.html
- https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Timedelta.html#pandas.Timedelta
- https://xarray.pydata.org/en/stable/generated/xarray.CFTimeIndex.shift.html
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in xarray/coding/cftime_offsets.py]
(definition of Tick:)
class Tick(BaseCFTimeOffset):
(definition of Tick._next_higher_resolution:)
def _next_higher_resolution(self):
(definition of Tick.__mul__:)
def __mul__(self, other):
[end of new definitions in xarray/coding/cftime_offsets.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
[FEATURE]: `CFTimeIndex.shift(float)`
### Is your feature request related to a problem?
`CFTimeIndex.shift()` allows only `int` but sometimes I'd like to shift by a float e.g. 0.5.
For small freqs, that shouldnt be a problem as `pd.Timedelta` allows floats for `days` and below.
For freqs of months and larger, it becomes more tricky. Fractional shifts work for `calendar=360` easily, for other `calendar`s thats not possible.
### Describe the solution you'd like
`CFTimeIndex.shift(0.5, 'D')`
`CFTimeIndex.shift(0.5, 'M')` for 360day calendar
`CFTimeIndex.shift(0.5, 'M')` for other calendars fails
### Describe alternatives you've considered
solution we have in climpred: https://github.com/pangeo-data/climpred/blob/617223b5bea23a094065efe46afeeafe9796fa97/climpred/utils.py#L657
### Additional context
https://xarray.pydata.org/en/stable/generated/xarray.CFTimeIndex.shift.html
----------
For shift intervals that can be represented as timedeltas this seems reasonably straightforward to add. I would hold off for monthly or annual intervals -- even for 360-day calendars, I don't think that non-integer shift factors are very well-defined in that context, since those frequencies involve rounding, e.g. to the beginnings or ends of months:
```
In [2]: times = xr.cftime_range("2000", freq="7D", periods=7)
In [3]: times
Out[3]:
CFTimeIndex([2000-01-01 00:00:00, 2000-01-08 00:00:00, 2000-01-15 00:00:00,
2000-01-22 00:00:00, 2000-01-29 00:00:00, 2000-02-05 00:00:00,
2000-02-12 00:00:00],
dtype='object', length=7, calendar='gregorian', freq='7D')
In [4]: times.shift(2, "M")
Out[4]:
CFTimeIndex([2000-02-29 00:00:00, 2000-02-29 00:00:00, 2000-02-29 00:00:00,
2000-02-29 00:00:00, 2000-02-29 00:00:00, 2000-03-31 00:00:00,
2000-03-31 00:00:00],
dtype='object', length=7, calendar='gregorian', freq='None')
```
--------------------
</issues> | 48290fa14accd3ac87768d3f73d69493b82b0be6 |
scikit-learn__scikit-learn-22118 | 22,118 | scikit-learn/scikit-learn | 1.1 | 142e388fa004e3367fdfc0be4a194be0d0c61c8c | 2022-01-03T04:27:26Z | diff --git a/doc/modules/classes.rst b/doc/modules/classes.rst
index b7000bcf7cbb2..c3e6c4f2f674b 100644
--- a/doc/modules/classes.rst
+++ b/doc/modules/classes.rst
@@ -996,6 +996,8 @@ details.
metrics.mean_tweedie_deviance
metrics.d2_tweedie_score
metrics.mean_pinball_loss
+ metrics.d2_pinball_score
+ metrics.d2_absolute_error_score
Multilabel ranking metrics
--------------------------
diff --git a/doc/modules/model_evaluation.rst b/doc/modules/model_evaluation.rst
index a1fce2d3454dc..8468dce3f93c5 100644
--- a/doc/modules/model_evaluation.rst
+++ b/doc/modules/model_evaluation.rst
@@ -101,6 +101,9 @@ Scoring Function
'neg_mean_poisson_deviance' :func:`metrics.mean_poisson_deviance`
'neg_mean_gamma_deviance' :func:`metrics.mean_gamma_deviance`
'neg_mean_absolute_percentage_error' :func:`metrics.mean_absolute_percentage_error`
+'d2_absolute_error_score' :func:`metrics.d2_absolute_error_score`
+'d2_pinball_score' :func:`metrics.d2_pinball_score`
+'d2_tweedie_score' :func:`metrics.d2_tweedie_score`
==================================== ============================================== ==================================
@@ -1968,7 +1971,8 @@ The :mod:`sklearn.metrics` module implements several loss, score, and utility
functions to measure regression performance. Some of those have been enhanced
to handle the multioutput case: :func:`mean_squared_error`,
:func:`mean_absolute_error`, :func:`r2_score`,
-:func:`explained_variance_score` and :func:`mean_pinball_loss`.
+:func:`explained_variance_score`, :func:`mean_pinball_loss`, :func:`d2_pinball_score`
+and :func:`d2_absolute_error_score`.
These functions have an ``multioutput`` keyword argument which specifies the
@@ -2370,8 +2374,8 @@ is defined as
\sum_{i=0}^{n_\text{samples} - 1}
\begin{cases}
(y_i-\hat{y}_i)^2, & \text{for }p=0\text{ (Normal)}\\
- 2(y_i \log(y/\hat{y}_i) + \hat{y}_i - y_i), & \text{for}p=1\text{ (Poisson)}\\
- 2(\log(\hat{y}_i/y_i) + y_i/\hat{y}_i - 1), & \text{for}p=2\text{ (Gamma)}\\
+ 2(y_i \log(y/\hat{y}_i) + \hat{y}_i - y_i), & \text{for }p=1\text{ (Poisson)}\\
+ 2(\log(\hat{y}_i/y_i) + y_i/\hat{y}_i - 1), & \text{for }p=2\text{ (Gamma)}\\
2\left(\frac{\max(y_i,0)^{2-p}}{(1-p)(2-p)}-
\frac{y\,\hat{y}^{1-p}_i}{1-p}+\frac{\hat{y}^{2-p}_i}{2-p}\right),
& \text{otherwise}
@@ -2414,34 +2418,6 @@ the difference in errors decreases. Finally, by setting, ``power=2``::
we would get identical errors. The deviance when ``power=2`` is thus only
sensitive to relative errors.
-.. _d2_tweedie_score:
-
-D² score, the coefficient of determination
--------------------------------------------
-
-The :func:`d2_tweedie_score` function computes the percentage of deviance
-explained. It is a generalization of R², where the squared error is replaced by
-the Tweedie deviance. D², also known as McFadden's likelihood ratio index, is
-calculated as
-
-.. math::
-
- D^2(y, \hat{y}) = 1 - \frac{\text{D}(y, \hat{y})}{\text{D}(y, \bar{y})} \,.
-
-The argument ``power`` defines the Tweedie power as for
-:func:`mean_tweedie_deviance`. Note that for `power=0`,
-:func:`d2_tweedie_score` equals :func:`r2_score` (for single targets).
-
-Like R², the best possible score is 1.0 and it can be negative (because the
-model can be arbitrarily worse). A constant model that always predicts the
-expected value of y, disregarding the input features, would get a D² score
-of 0.0.
-
-A scorer object with a specific choice of ``power`` can be built by::
-
- >>> from sklearn.metrics import d2_tweedie_score, make_scorer
- >>> d2_tweedie_score_15 = make_scorer(d2_tweedie_score, power=1.5)
-
.. _pinball_loss:
Pinball loss
@@ -2506,6 +2482,93 @@ explained in the example linked below.
hyper-parameters of quantile regression models on data with non-symmetric
noise and outliers.
+.. _d2_score:
+
+D² score
+--------
+
+The D² score computes the fraction of deviance explained.
+It is a generalization of R², where the squared error is generalized and replaced
+by a deviance of choice :math:`\text{dev}(y, \hat{y})`
+(e.g., Tweedie, pinball or mean absolute error). D² is a form of a *skill score*.
+It is calculated as
+
+.. math::
+
+ D^2(y, \hat{y}) = 1 - \frac{\text{dev}(y, \hat{y})}{\text{dev}(y, y_{\text{null}})} \,.
+
+Where :math:`y_{\text{null}}` is the optimal prediction of an intercept-only model
+(e.g., the mean of `y_true` for the Tweedie case, the median for absolute
+error and the alpha-quantile for pinball loss).
+
+Like R², the best possible score is 1.0 and it can be negative (because the
+model can be arbitrarily worse). A constant model that always predicts
+:math:`y_{\text{null}}`, disregarding the input features, would get a D² score
+of 0.0.
+
+D² Tweedie score
+^^^^^^^^^^^^^^^^
+
+The :func:`d2_tweedie_score` function implements the special case of D²
+where :math:`\text{dev}(y, \hat{y})` is the Tweedie deviance, see :ref:`mean_tweedie_deviance`.
+It is also known as D² Tweedie and is related to McFadden's likelihood ratio index.
+
+The argument ``power`` defines the Tweedie power as for
+:func:`mean_tweedie_deviance`. Note that for `power=0`,
+:func:`d2_tweedie_score` equals :func:`r2_score` (for single targets).
+
+A scorer object with a specific choice of ``power`` can be built by::
+
+ >>> from sklearn.metrics import d2_tweedie_score, make_scorer
+ >>> d2_tweedie_score_15 = make_scorer(d2_tweedie_score, power=1.5)
+
+D² pinball score
+^^^^^^^^^^^^^^^^^^^^^
+
+The :func:`d2_pinball_score` function implements the special case
+of D² with the pinball loss, see :ref:`pinball_loss`, i.e.:
+
+.. math::
+
+ \text{dev}(y, \hat{y}) = \text{pinball}(y, \hat{y}).
+
+The argument ``alpha`` defines the slope of the pinball loss as for
+:func:`mean_pinball_loss` (:ref:`pinball_loss`). It determines the
+quantile level ``alpha`` for which the pinball loss and also D²
+are optimal. Note that for `alpha=0.5` (the default) :func:`d2_pinball_score`
+equals :func:`d2_absolute_error_score`.
+
+A scorer object with a specific choice of ``alpha`` can be built by::
+
+ >>> from sklearn.metrics import d2_pinball_score, make_scorer
+ >>> d2_pinball_score_08 = make_scorer(d2_pinball_score, alpha=0.8)
+
+D² absolute error score
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The :func:`d2_absolute_error_score` function implements the special case of
+the :ref:`mean_absolute_error`:
+
+.. math::
+
+ \text{dev}(y, \hat{y}) = \text{MAE}(y, \hat{y}).
+
+Here are some usage examples of the :func:`d2_absolute_error_score` function::
+
+ >>> from sklearn.metrics import d2_absolute_error_score
+ >>> y_true = [3, -0.5, 2, 7]
+ >>> y_pred = [2.5, 0.0, 2, 8]
+ >>> d2_absolute_error_score(y_true, y_pred)
+ 0.764...
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [1, 2, 3]
+ >>> d2_absolute_error_score(y_true, y_pred)
+ 1.0
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [2, 2, 2]
+ >>> d2_absolute_error_score(y_true, y_pred)
+ 0.0
+
.. _clustering_metrics:
diff --git a/doc/whats_new/v1.1.rst b/doc/whats_new/v1.1.rst
index b9ab45e1d344a..85053e5a68679 100644
--- a/doc/whats_new/v1.1.rst
+++ b/doc/whats_new/v1.1.rst
@@ -630,6 +630,14 @@ Changelog
instead of the finite approximation (`1.0` and `0.0` respectively) currently
returned by default. :pr:`17266` by :user:`Sylvain Marié <smarie>`.
+- |Feature| :func:`d2_pinball_score` and :func:`d2_absolute_error_score`
+ calculate the :math:`D^2` regression score for the pinball loss and the
+ absolute error respectively. :func:`d2_absolute_error_score` is a special case
+ of :func:`d2_pinball_score` with a fixed quantile parameter `alpha=0.5`
+ for ease of use and discovery. The :math:`D^2` scores are generalizations
+ of the `r2_score` and can be interpeted as the fraction of deviance explained.
+ :pr:`22118` by :user:`Ohad Michel <ohadmich>`
+
- |Enhancement| :func:`metrics.top_k_accuracy_score` raises an improved error
message when `y_true` is binary and `y_score` is 2d. :pr:`22284` by `Thomas Fan`_.
diff --git a/sklearn/metrics/__init__.py b/sklearn/metrics/__init__.py
index e4339229c5b64..02ae7d41ebc31 100644
--- a/sklearn/metrics/__init__.py
+++ b/sklearn/metrics/__init__.py
@@ -77,6 +77,8 @@
from ._regression import mean_poisson_deviance
from ._regression import mean_gamma_deviance
from ._regression import d2_tweedie_score
+from ._regression import d2_pinball_score
+from ._regression import d2_absolute_error_score
from ._scorer import check_scoring
@@ -113,6 +115,8 @@
"consensus_score",
"coverage_error",
"d2_tweedie_score",
+ "d2_absolute_error_score",
+ "d2_pinball_score",
"dcg_score",
"davies_bouldin_score",
"DetCurveDisplay",
diff --git a/sklearn/metrics/_regression.py b/sklearn/metrics/_regression.py
index c701320f9c23a..de8aef20aa7c2 100644
--- a/sklearn/metrics/_regression.py
+++ b/sklearn/metrics/_regression.py
@@ -23,6 +23,7 @@
# Ashutosh Hathidara <ashutoshhathidara98@gmail.com>
# Uttam kumar <bajiraouttamsinha@gmail.com>
# Sylvain Marie <sylvain.marie@se.com>
+# Ohad Michel <ohadmich@gmail.com>
# License: BSD 3 clause
import warnings
@@ -54,6 +55,9 @@
"mean_tweedie_deviance",
"mean_poisson_deviance",
"mean_gamma_deviance",
+ "d2_tweedie_score",
+ "d2_pinball_score",
+ "d2_absolute_error_score",
]
@@ -70,6 +74,9 @@ def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"):
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
+ dtype : str or list, default="numeric"
+ the dtype argument passed to check_array.
+
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
@@ -87,9 +94,6 @@ def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"):
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
-
- dtype : str or list, default="numeric"
- the dtype argument passed to check_array.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
@@ -1102,13 +1106,13 @@ def mean_gamma_deviance(y_true, y_pred, *, sample_weight=None):
def d2_tweedie_score(y_true, y_pred, *, sample_weight=None, power=0):
- """D^2 regression score function, percentage of Tweedie deviance explained.
+ """D^2 regression score function, fraction of Tweedie deviance explained.
Best possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A model that always uses the empirical mean of `y_true` as
constant prediction, disregarding the input features, gets a D^2 score of 0.0.
- Read more in the :ref:`User Guide <d2_tweedie_score>`.
+ Read more in the :ref:`User Guide <d2_score>`.
.. versionadded:: 1.0
@@ -1203,3 +1207,237 @@ def d2_tweedie_score(y_true, y_pred, *, sample_weight=None, power=0):
denominator = np.average(dev, weights=sample_weight)
return 1 - numerator / denominator
+
+
+def d2_pinball_score(
+ y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average"
+):
+ """
+ :math:`D^2` regression score function, fraction of pinball loss explained.
+
+ Best possible score is 1.0 and it can be negative (because the model can be
+ arbitrarily worse). A model that always uses the empirical alpha-quantile of
+ `y_true` as constant prediction, disregarding the input features,
+ gets a :math:`D^2` score of 0.0.
+
+ Read more in the :ref:`User Guide <d2_score>`.
+
+ .. versionadded:: 1.1
+
+ Parameters
+ ----------
+ y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
+ Ground truth (correct) target values.
+
+ y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
+ Estimated target values.
+
+ sample_weight : array-like of shape (n_samples,), optional
+ Sample weights.
+
+ alpha : float, default=0.5
+ Slope of the pinball deviance. It determines the quantile level alpha
+ for which the pinball deviance and also D2 are optimal.
+ The default `alpha=0.5` is equivalent to `d2_absolute_error_score`.
+
+ multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
+ (n_outputs,), default='uniform_average'
+ Defines aggregating of multiple output values.
+ Array-like value defines weights used to average scores.
+
+ 'raw_values' :
+ Returns a full set of errors in case of multioutput input.
+
+ 'uniform_average' :
+ Scores of all outputs are averaged with uniform weight.
+
+ Returns
+ -------
+ score : float or ndarray of floats
+ The :math:`D^2` score with a pinball deviance
+ or ndarray of scores if `multioutput='raw_values'`.
+
+ Notes
+ -----
+ Like :math:`R^2`, :math:`D^2` score may be negative
+ (it need not actually be the square of a quantity D).
+
+ This metric is not well-defined for a single point and will return a NaN
+ value if n_samples is less than two.
+
+ References
+ ----------
+ .. [1] Eq. (7) of `Koenker, Roger; Machado, José A. F. (1999).
+ "Goodness of Fit and Related Inference Processes for Quantile Regression"
+ <http://dx.doi.org/10.1080/01621459.1999.10473882>`_
+ .. [2] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
+ Wainwright. "Statistical Learning with Sparsity: The Lasso and
+ Generalizations." (2015). https://trevorhastie.github.io
+
+ Examples
+ --------
+ >>> from sklearn.metrics import d2_pinball_score
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [1, 3, 3]
+ >>> d2_pinball_score(y_true, y_pred)
+ 0.5
+ >>> d2_pinball_score(y_true, y_pred, alpha=0.9)
+ 0.772...
+ >>> d2_pinball_score(y_true, y_pred, alpha=0.1)
+ -1.045...
+ >>> d2_pinball_score(y_true, y_true, alpha=0.1)
+ 1.0
+ """
+ y_type, y_true, y_pred, multioutput = _check_reg_targets(
+ y_true, y_pred, multioutput
+ )
+ check_consistent_length(y_true, y_pred, sample_weight)
+
+ if _num_samples(y_pred) < 2:
+ msg = "D^2 score is not well-defined with less than two samples."
+ warnings.warn(msg, UndefinedMetricWarning)
+ return float("nan")
+
+ numerator = mean_pinball_loss(
+ y_true,
+ y_pred,
+ sample_weight=sample_weight,
+ alpha=alpha,
+ multioutput="raw_values",
+ )
+
+ if sample_weight is None:
+ y_quantile = np.tile(
+ np.percentile(y_true, q=alpha * 100, axis=0), (len(y_true), 1)
+ )
+ else:
+ sample_weight = _check_sample_weight(sample_weight, y_true)
+ y_quantile = np.tile(
+ _weighted_percentile(
+ y_true, sample_weight=sample_weight, percentile=alpha * 100
+ ),
+ (len(y_true), 1),
+ )
+
+ denominator = mean_pinball_loss(
+ y_true,
+ y_quantile,
+ sample_weight=sample_weight,
+ alpha=alpha,
+ multioutput="raw_values",
+ )
+
+ nonzero_numerator = numerator != 0
+ nonzero_denominator = denominator != 0
+ valid_score = nonzero_numerator & nonzero_denominator
+ output_scores = np.ones(y_true.shape[1])
+
+ output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score])
+ output_scores[nonzero_numerator & ~nonzero_denominator] = 0.0
+
+ if isinstance(multioutput, str):
+ if multioutput == "raw_values":
+ # return scores individually
+ return output_scores
+ elif multioutput == "uniform_average":
+ # passing None as weights to np.average results in uniform mean
+ avg_weights = None
+ else:
+ raise ValueError(
+ "multioutput is expected to be 'raw_values' "
+ "or 'uniform_average' but we got %r"
+ " instead." % multioutput
+ )
+ else:
+ avg_weights = multioutput
+
+ return np.average(output_scores, weights=avg_weights)
+
+
+def d2_absolute_error_score(
+ y_true, y_pred, *, sample_weight=None, multioutput="uniform_average"
+):
+ """
+ :math:`D^2` regression score function, \
+ fraction of absolute error explained.
+
+ Best possible score is 1.0 and it can be negative (because the model can be
+ arbitrarily worse). A model that always uses the empirical median of `y_true`
+ as constant prediction, disregarding the input features,
+ gets a :math:`D^2` score of 0.0.
+
+ Read more in the :ref:`User Guide <d2_score>`.
+
+ .. versionadded:: 1.1
+
+ Parameters
+ ----------
+ y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
+ Ground truth (correct) target values.
+
+ y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
+ Estimated target values.
+
+ sample_weight : array-like of shape (n_samples,), optional
+ Sample weights.
+
+ multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
+ (n_outputs,), default='uniform_average'
+ Defines aggregating of multiple output values.
+ Array-like value defines weights used to average scores.
+
+ 'raw_values' :
+ Returns a full set of errors in case of multioutput input.
+
+ 'uniform_average' :
+ Scores of all outputs are averaged with uniform weight.
+
+ Returns
+ -------
+ score : float or ndarray of floats
+ The :math:`D^2` score with an absolute error deviance
+ or ndarray of scores if 'multioutput' is 'raw_values'.
+
+ Notes
+ -----
+ Like :math:`R^2`, :math:`D^2` score may be negative
+ (it need not actually be the square of a quantity D).
+
+ This metric is not well-defined for single samples and will return a NaN
+ value if n_samples is less than two.
+
+ References
+ ----------
+ .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
+ Wainwright. "Statistical Learning with Sparsity: The Lasso and
+ Generalizations." (2015). https://trevorhastie.github.io
+
+ Examples
+ --------
+ >>> from sklearn.metrics import d2_absolute_error_score
+ >>> y_true = [3, -0.5, 2, 7]
+ >>> y_pred = [2.5, 0.0, 2, 8]
+ >>> d2_absolute_error_score(y_true, y_pred)
+ 0.764...
+ >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
+ >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
+ >>> d2_absolute_error_score(y_true, y_pred, multioutput='uniform_average')
+ 0.691...
+ >>> d2_absolute_error_score(y_true, y_pred, multioutput='raw_values')
+ array([0.8125 , 0.57142857])
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [1, 2, 3]
+ >>> d2_absolute_error_score(y_true, y_pred)
+ 1.0
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [2, 2, 2]
+ >>> d2_absolute_error_score(y_true, y_pred)
+ 0.0
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [3, 2, 1]
+ >>> d2_absolute_error_score(y_true, y_pred)
+ -1.0
+ """
+ return d2_pinball_score(
+ y_true, y_pred, sample_weight=sample_weight, alpha=0.5, multioutput=multioutput
+ )
| diff --git a/sklearn/metrics/tests/test_common.py b/sklearn/metrics/tests/test_common.py
index dfd43ef34096f..1e627f9f86676 100644
--- a/sklearn/metrics/tests/test_common.py
+++ b/sklearn/metrics/tests/test_common.py
@@ -30,6 +30,8 @@
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import d2_tweedie_score
+from sklearn.metrics import d2_pinball_score
+from sklearn.metrics import d2_absolute_error_score
from sklearn.metrics import det_curve
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
@@ -112,6 +114,8 @@
"mean_gamma_deviance": mean_gamma_deviance,
"mean_compound_poisson_deviance": partial(mean_tweedie_deviance, power=1.4),
"d2_tweedie_score": partial(d2_tweedie_score, power=1.4),
+ "d2_pinball_score": d2_pinball_score,
+ "d2_absolute_error_score": d2_absolute_error_score,
}
CLASSIFICATION_METRICS = {
@@ -446,6 +450,8 @@ def precision_recall_curve_padded_thresholds(*args, **kwargs):
"explained_variance_score",
"mean_absolute_percentage_error",
"mean_pinball_loss",
+ "d2_pinball_score",
+ "d2_absolute_error_score",
}
# Symmetric with respect to their input arguments y_true and y_pred
@@ -513,6 +519,8 @@ def precision_recall_curve_padded_thresholds(*args, **kwargs):
"mean_poisson_deviance",
"mean_compound_poisson_deviance",
"d2_tweedie_score",
+ "d2_pinball_score",
+ "d2_absolute_error_score",
"mean_absolute_percentage_error",
}
diff --git a/sklearn/metrics/tests/test_regression.py b/sklearn/metrics/tests/test_regression.py
index f5ffc648ed21b..a95125493047b 100644
--- a/sklearn/metrics/tests/test_regression.py
+++ b/sklearn/metrics/tests/test_regression.py
@@ -22,6 +22,8 @@
from sklearn.metrics import r2_score
from sklearn.metrics import mean_tweedie_deviance
from sklearn.metrics import d2_tweedie_score
+from sklearn.metrics import d2_pinball_score
+from sklearn.metrics import d2_absolute_error_score
from sklearn.metrics import make_scorer
from sklearn.metrics._regression import _check_reg_targets
@@ -62,6 +64,26 @@ def test_regression_metrics(n_samples=50):
assert_almost_equal(
d2_tweedie_score(y_true, y_pred, power=0), r2_score(y_true, y_pred)
)
+ dev_median = np.abs(y_true - np.median(y_true)).sum()
+ assert_array_almost_equal(
+ d2_absolute_error_score(y_true, y_pred),
+ 1 - np.abs(y_true - y_pred).sum() / dev_median,
+ )
+ alpha = 0.2
+ pinball_loss = lambda y_true, y_pred, alpha: alpha * np.maximum(
+ y_true - y_pred, 0
+ ) + (1 - alpha) * np.maximum(y_pred - y_true, 0)
+ y_quantile = np.percentile(y_true, q=alpha * 100)
+ assert_almost_equal(
+ d2_pinball_score(y_true, y_pred, alpha=alpha),
+ 1
+ - pinball_loss(y_true, y_pred, alpha).sum()
+ / pinball_loss(y_true, y_quantile, alpha).sum(),
+ )
+ assert_almost_equal(
+ d2_absolute_error_score(y_true, y_pred),
+ d2_pinball_score(y_true, y_pred, alpha=0.5),
+ )
# Tweedie deviance needs positive y_pred, except for p=0,
# p>=2 needs positive y_true
@@ -139,6 +161,20 @@ def test_multioutput_regression():
error = r2_score(y_true, y_pred, multioutput="uniform_average")
assert_almost_equal(error, -0.875)
+ score = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="raw_values")
+ raw_expected_score = [
+ 1
+ - np.abs(y_true[:, i] - y_pred[:, i]).sum()
+ / np.abs(y_true[:, i] - np.median(y_true[:, i])).sum()
+ for i in range(y_true.shape[1])
+ ]
+ # in the last case, the denominator vanishes and hence we get nan,
+ # but since the the numerator vanishes as well the expected score is 1.0
+ raw_expected_score = np.where(np.isnan(raw_expected_score), 1, raw_expected_score)
+ assert_array_almost_equal(score, raw_expected_score)
+
+ score = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="uniform_average")
+ assert_almost_equal(score, raw_expected_score.mean())
# constant `y_true` with force_finite=True leads to 1. or 0.
yc = [5.0, 5.0]
error = r2_score(yc, [5.0, 5.0], multioutput="variance_weighted")
@@ -192,6 +228,7 @@ def test_regression_metrics_at_limits():
# Perfect cases
assert_almost_equal(r2_score([0.0, 1], [0.0, 1]), 1.0)
+ assert_almost_equal(d2_pinball_score([0.0, 1], [0.0, 1]), 1.0)
# Non-finite cases
# R² and explained variance have a fix by default for non-finite cases
@@ -319,10 +356,15 @@ def test_regression_multioutput_array():
)
with pytest.raises(ValueError, match=err_msg):
mean_pinball_loss(y_true, y_pred, multioutput="variance_weighted")
+
+ with pytest.raises(ValueError, match=err_msg):
+ d2_pinball_score(y_true, y_pred, multioutput="variance_weighted")
+
pbl = mean_pinball_loss(y_true, y_pred, multioutput="raw_values")
mape = mean_absolute_percentage_error(y_true, y_pred, multioutput="raw_values")
r = r2_score(y_true, y_pred, multioutput="raw_values")
evs = explained_variance_score(y_true, y_pred, multioutput="raw_values")
+ d2ps = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="raw_values")
evs2 = explained_variance_score(
y_true, y_pred, multioutput="raw_values", force_finite=False
)
@@ -333,6 +375,7 @@ def test_regression_multioutput_array():
assert_array_almost_equal(mape, [0.0778, 0.2262], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
+ assert_array_almost_equal(d2ps, [0.833, 0.722], decimal=2)
assert_array_almost_equal(evs2, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
@@ -343,10 +386,12 @@ def test_regression_multioutput_array():
mae = mean_absolute_error(y_true, y_pred, multioutput="raw_values")
pbl = mean_pinball_loss(y_true, y_pred, multioutput="raw_values")
r = r2_score(y_true, y_pred, multioutput="raw_values")
+ d2ps = d2_pinball_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(mse, [1.0, 1.0], decimal=2)
assert_array_almost_equal(mae, [1.0, 1.0], decimal=2)
assert_array_almost_equal(pbl, [0.5, 0.5], decimal=2)
assert_array_almost_equal(r, [0.0, 0.0], decimal=2)
+ assert_array_almost_equal(d2ps, [0.0, 0.0], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput="raw_values")
assert_array_almost_equal(r, [0, -3.5], decimal=2)
@@ -382,6 +427,8 @@ def test_regression_multioutput_array():
evs = explained_variance_score(y_true, y_pred, multioutput="raw_values")
assert_array_almost_equal(evs, [1.0, -3.0], decimal=2)
assert np.mean(evs) == explained_variance_score(y_true, y_pred)
+ d2ps = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput="raw_values")
+ assert_array_almost_equal(d2ps, [1.0, -1.0], decimal=2)
evs2 = explained_variance_score(
y_true, y_pred, multioutput="raw_values", force_finite=False
)
@@ -410,6 +457,7 @@ def test_regression_custom_weights():
mapew = mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
+ d2psw = d2_pinball_score(y_true, y_pred, alpha=0.5, multioutput=[0.4, 0.6])
evsw2 = explained_variance_score(
y_true, y_pred, multioutput=[0.4, 0.6], force_finite=False
)
@@ -420,6 +468,7 @@ def test_regression_custom_weights():
assert_almost_equal(mapew, 0.1668, decimal=2)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
+ assert_almost_equal(d2psw, 0.766, decimal=2)
assert_almost_equal(evsw2, 0.94, decimal=2)
# Handling msle separately as it does not accept negative inputs.
@@ -432,7 +481,7 @@ def test_regression_custom_weights():
assert_almost_equal(msle, msle2, decimal=2)
-@pytest.mark.parametrize("metric", [r2_score, d2_tweedie_score])
+@pytest.mark.parametrize("metric", [r2_score, d2_tweedie_score, d2_pinball_score])
def test_regression_single_sample(metric):
y_true = [0]
y_pred = [1]
| diff --git a/doc/modules/classes.rst b/doc/modules/classes.rst
index b7000bcf7cbb2..c3e6c4f2f674b 100644
--- a/doc/modules/classes.rst
+++ b/doc/modules/classes.rst
@@ -996,6 +996,8 @@ details.
metrics.mean_tweedie_deviance
metrics.d2_tweedie_score
metrics.mean_pinball_loss
+ metrics.d2_pinball_score
+ metrics.d2_absolute_error_score
Multilabel ranking metrics
--------------------------
diff --git a/doc/modules/model_evaluation.rst b/doc/modules/model_evaluation.rst
index a1fce2d3454dc..8468dce3f93c5 100644
--- a/doc/modules/model_evaluation.rst
+++ b/doc/modules/model_evaluation.rst
@@ -101,6 +101,9 @@ Scoring Function
'neg_mean_poisson_deviance' :func:`metrics.mean_poisson_deviance`
'neg_mean_gamma_deviance' :func:`metrics.mean_gamma_deviance`
'neg_mean_absolute_percentage_error' :func:`metrics.mean_absolute_percentage_error`
+'d2_absolute_error_score' :func:`metrics.d2_absolute_error_score`
+'d2_pinball_score' :func:`metrics.d2_pinball_score`
+'d2_tweedie_score' :func:`metrics.d2_tweedie_score`
==================================== ============================================== ==================================
@@ -1968,7 +1971,8 @@ The :mod:`sklearn.metrics` module implements several loss, score, and utility
functions to measure regression performance. Some of those have been enhanced
to handle the multioutput case: :func:`mean_squared_error`,
:func:`mean_absolute_error`, :func:`r2_score`,
-:func:`explained_variance_score` and :func:`mean_pinball_loss`.
+:func:`explained_variance_score`, :func:`mean_pinball_loss`, :func:`d2_pinball_score`
+and :func:`d2_absolute_error_score`.
These functions have an ``multioutput`` keyword argument which specifies the
@@ -2370,8 +2374,8 @@ is defined as
\sum_{i=0}^{n_\text{samples} - 1}
\begin{cases}
(y_i-\hat{y}_i)^2, & \text{for }p=0\text{ (Normal)}\\
- 2(y_i \log(y/\hat{y}_i) + \hat{y}_i - y_i), & \text{for}p=1\text{ (Poisson)}\\
- 2(\log(\hat{y}_i/y_i) + y_i/\hat{y}_i - 1), & \text{for}p=2\text{ (Gamma)}\\
+ 2(y_i \log(y/\hat{y}_i) + \hat{y}_i - y_i), & \text{for }p=1\text{ (Poisson)}\\
+ 2(\log(\hat{y}_i/y_i) + y_i/\hat{y}_i - 1), & \text{for }p=2\text{ (Gamma)}\\
2\left(\frac{\max(y_i,0)^{2-p}}{(1-p)(2-p)}-
\frac{y\,\hat{y}^{1-p}_i}{1-p}+\frac{\hat{y}^{2-p}_i}{2-p}\right),
& \text{otherwise}
@@ -2414,34 +2418,6 @@ the difference in errors decreases. Finally, by setting, ``power=2``::
we would get identical errors. The deviance when ``power=2`` is thus only
sensitive to relative errors.
-.. _d2_tweedie_score:
-
-D² score, the coefficient of determination
--------------------------------------------
-
-The :func:`d2_tweedie_score` function computes the percentage of deviance
-explained. It is a generalization of R², where the squared error is replaced by
-the Tweedie deviance. D², also known as McFadden's likelihood ratio index, is
-calculated as
-
-.. math::
-
- D^2(y, \hat{y}) = 1 - \frac{\text{D}(y, \hat{y})}{\text{D}(y, \bar{y})} \,.
-
-The argument ``power`` defines the Tweedie power as for
-:func:`mean_tweedie_deviance`. Note that for `power=0`,
-:func:`d2_tweedie_score` equals :func:`r2_score` (for single targets).
-
-Like R², the best possible score is 1.0 and it can be negative (because the
-model can be arbitrarily worse). A constant model that always predicts the
-expected value of y, disregarding the input features, would get a D² score
-of 0.0.
-
-A scorer object with a specific choice of ``power`` can be built by::
-
- >>> from sklearn.metrics import d2_tweedie_score, make_scorer
- >>> d2_tweedie_score_15 = make_scorer(d2_tweedie_score, power=1.5)
-
.. _pinball_loss:
Pinball loss
@@ -2506,6 +2482,93 @@ explained in the example linked below.
hyper-parameters of quantile regression models on data with non-symmetric
noise and outliers.
+.. _d2_score:
+
+D² score
+--------
+
+The D² score computes the fraction of deviance explained.
+It is a generalization of R², where the squared error is generalized and replaced
+by a deviance of choice :math:`\text{dev}(y, \hat{y})`
+(e.g., Tweedie, pinball or mean absolute error). D² is a form of a *skill score*.
+It is calculated as
+
+.. math::
+
+ D^2(y, \hat{y}) = 1 - \frac{\text{dev}(y, \hat{y})}{\text{dev}(y, y_{\text{null}})} \,.
+
+Where :math:`y_{\text{null}}` is the optimal prediction of an intercept-only model
+(e.g., the mean of `y_true` for the Tweedie case, the median for absolute
+error and the alpha-quantile for pinball loss).
+
+Like R², the best possible score is 1.0 and it can be negative (because the
+model can be arbitrarily worse). A constant model that always predicts
+:math:`y_{\text{null}}`, disregarding the input features, would get a D² score
+of 0.0.
+
+D² Tweedie score
+^^^^^^^^^^^^^^^^
+
+The :func:`d2_tweedie_score` function implements the special case of D²
+where :math:`\text{dev}(y, \hat{y})` is the Tweedie deviance, see :ref:`mean_tweedie_deviance`.
+It is also known as D² Tweedie and is related to McFadden's likelihood ratio index.
+
+The argument ``power`` defines the Tweedie power as for
+:func:`mean_tweedie_deviance`. Note that for `power=0`,
+:func:`d2_tweedie_score` equals :func:`r2_score` (for single targets).
+
+A scorer object with a specific choice of ``power`` can be built by::
+
+ >>> from sklearn.metrics import d2_tweedie_score, make_scorer
+ >>> d2_tweedie_score_15 = make_scorer(d2_tweedie_score, power=1.5)
+
+D² pinball score
+^^^^^^^^^^^^^^^^^^^^^
+
+The :func:`d2_pinball_score` function implements the special case
+of D² with the pinball loss, see :ref:`pinball_loss`, i.e.:
+
+.. math::
+
+ \text{dev}(y, \hat{y}) = \text{pinball}(y, \hat{y}).
+
+The argument ``alpha`` defines the slope of the pinball loss as for
+:func:`mean_pinball_loss` (:ref:`pinball_loss`). It determines the
+quantile level ``alpha`` for which the pinball loss and also D²
+are optimal. Note that for `alpha=0.5` (the default) :func:`d2_pinball_score`
+equals :func:`d2_absolute_error_score`.
+
+A scorer object with a specific choice of ``alpha`` can be built by::
+
+ >>> from sklearn.metrics import d2_pinball_score, make_scorer
+ >>> d2_pinball_score_08 = make_scorer(d2_pinball_score, alpha=0.8)
+
+D² absolute error score
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The :func:`d2_absolute_error_score` function implements the special case of
+the :ref:`mean_absolute_error`:
+
+.. math::
+
+ \text{dev}(y, \hat{y}) = \text{MAE}(y, \hat{y}).
+
+Here are some usage examples of the :func:`d2_absolute_error_score` function::
+
+ >>> from sklearn.metrics import d2_absolute_error_score
+ >>> y_true = [3, -0.5, 2, 7]
+ >>> y_pred = [2.5, 0.0, 2, 8]
+ >>> d2_absolute_error_score(y_true, y_pred)
+ 0.764...
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [1, 2, 3]
+ >>> d2_absolute_error_score(y_true, y_pred)
+ 1.0
+ >>> y_true = [1, 2, 3]
+ >>> y_pred = [2, 2, 2]
+ >>> d2_absolute_error_score(y_true, y_pred)
+ 0.0
+
.. _clustering_metrics:
diff --git a/doc/whats_new/v1.1.rst b/doc/whats_new/v1.1.rst
index b9ab45e1d344a..85053e5a68679 100644
--- a/doc/whats_new/v1.1.rst
+++ b/doc/whats_new/v1.1.rst
@@ -630,6 +630,14 @@ Changelog
instead of the finite approximation (`1.0` and `0.0` respectively) currently
returned by default. :pr:`17266` by :user:`Sylvain Marié <smarie>`.
+- |Feature| :func:`d2_pinball_score` and :func:`d2_absolute_error_score`
+ calculate the :math:`D^2` regression score for the pinball loss and the
+ absolute error respectively. :func:`d2_absolute_error_score` is a special case
+ of :func:`d2_pinball_score` with a fixed quantile parameter `alpha=0.5`
+ for ease of use and discovery. The :math:`D^2` scores are generalizations
+ of the `r2_score` and can be interpeted as the fraction of deviance explained.
+ :pr:`22118` by :user:`Ohad Michel <ohadmich>`
+
- |Enhancement| :func:`metrics.top_k_accuracy_score` raises an improved error
message when `y_true` is binary and `y_score` is 2d. :pr:`22284` by `Thomas Fan`_.
| [
{
"components": [
{
"doc": ":math:`D^2` regression score function, fraction of pinball loss explained.\n\nBest possible score is 1.0 and it can be negative (because the model can be\narbitrarily worse). A model that always uses the empirical alpha-quantile of\n`y_true` as constant prediction, disr... | [
"sklearn/metrics/tests/test_common.py::test_symmetry_consistency",
"sklearn/metrics/tests/test_common.py::test_symmetric_metric[accuracy_score]",
"sklearn/metrics/tests/test_common.py::test_symmetric_metric[cohen_kappa_score]",
"sklearn/metrics/tests/test_common.py::test_symmetric_metric[f1_score]",
"sklear... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
ENH add D2 pinbal score and D2 absolute error score
<!--
Thanks for contributing a pull request! Please ensure you have taken a look at
the contribution guidelines: https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md
-->
#### Reference Issues/PRs
<!--
Example: Fixes #1234. See also #3456.
Please use keywords (e.g., Fixes) to create link to the issues or pull requests
you resolved, so that they will automatically be closed when your pull request
is merged. See https://github.com/blog/1506-closing-issues-via-pull-requests
-->
This PR is related to issue #20943.
Merging this PR won't fully resolve the issue above as it only implements
part of the requested improvements.
#### What does this implement/fix? Explain your changes.
This PR implements `d2_pinball_score` and `d2_absolute_error_score` as requested by @lorentzenchr .
See also #20943.
#### Any other comments?
I implemented the score function and updated the docstrings based on
my best understanding, but please let me know if I misunderstood something
and I'll be happy to make improvements. Thanks!
<!--
Please be aware that we are a loose team of volunteers so patience is
necessary; assistance handling other issues is very welcome. We value
all user contributions, no matter how minor they are. If we are slow to
review, either the pull request needs some benchmarking, tinkering,
convincing, etc. or more likely the reviewers are simply busy. In either
case, we ask for your understanding during the review process.
For more information, see our FAQ on this topic:
http://scikit-learn.org/dev/faq.html#why-is-my-pull-request-not-getting-any-attention.
Thanks for contributing!
-->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sklearn/metrics/_regression.py]
(definition of d2_pinball_score:)
def d2_pinball_score( y_true, y_pred, *, sample_weight=None, alpha=0.5, multioutput="uniform_average" ):
""":math:`D^2` regression score function, fraction of pinball loss explained.
Best possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A model that always uses the empirical alpha-quantile of
`y_true` as constant prediction, disregarding the input features,
gets a :math:`D^2` score of 0.0.
Read more in the :ref:`User Guide <d2_score>`.
.. versionadded:: 1.1
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), optional
Sample weights.
alpha : float, default=0.5
Slope of the pinball deviance. It determines the quantile level alpha
for which the pinball deviance and also D2 are optimal.
The default `alpha=0.5` is equivalent to `d2_absolute_error_score`.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
Returns
-------
score : float or ndarray of floats
The :math:`D^2` score with a pinball deviance
or ndarray of scores if `multioutput='raw_values'`.
Notes
-----
Like :math:`R^2`, :math:`D^2` score may be negative
(it need not actually be the square of a quantity D).
This metric is not well-defined for a single point and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] Eq. (7) of `Koenker, Roger; Machado, José A. F. (1999).
"Goodness of Fit and Related Inference Processes for Quantile Regression"
<http://dx.doi.org/10.1080/01621459.1999.10473882>`_
.. [2] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
Wainwright. "Statistical Learning with Sparsity: The Lasso and
Generalizations." (2015). https://trevorhastie.github.io
Examples
--------
>>> from sklearn.metrics import d2_pinball_score
>>> y_true = [1, 2, 3]
>>> y_pred = [1, 3, 3]
>>> d2_pinball_score(y_true, y_pred)
0.5
>>> d2_pinball_score(y_true, y_pred, alpha=0.9)
0.772...
>>> d2_pinball_score(y_true, y_pred, alpha=0.1)
-1.045...
>>> d2_pinball_score(y_true, y_true, alpha=0.1)
1.0"""
(definition of d2_absolute_error_score:)
def d2_absolute_error_score( y_true, y_pred, *, sample_weight=None, multioutput="uniform_average" ):
""":math:`D^2` regression score function, fraction of absolute error explained.
Best possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A model that always uses the empirical median of `y_true`
as constant prediction, disregarding the input features,
gets a :math:`D^2` score of 0.0.
Read more in the :ref:`User Guide <d2_score>`.
.. versionadded:: 1.1
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), optional
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape (n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
Returns
-------
score : float or ndarray of floats
The :math:`D^2` score with an absolute error deviance
or ndarray of scores if 'multioutput' is 'raw_values'.
Notes
-----
Like :math:`R^2`, :math:`D^2` score may be negative
(it need not actually be the square of a quantity D).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
Wainwright. "Statistical Learning with Sparsity: The Lasso and
Generalizations." (2015). https://trevorhastie.github.io
Examples
--------
>>> from sklearn.metrics import d2_absolute_error_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> d2_absolute_error_score(y_true, y_pred)
0.764...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> d2_absolute_error_score(y_true, y_pred, multioutput='uniform_average')
0.691...
>>> d2_absolute_error_score(y_true, y_pred, multioutput='raw_values')
array([0.8125 , 0.57142857])
>>> y_true = [1, 2, 3]
>>> y_pred = [1, 2, 3]
>>> d2_absolute_error_score(y_true, y_pred)
1.0
>>> y_true = [1, 2, 3]
>>> y_pred = [2, 2, 2]
>>> d2_absolute_error_score(y_true, y_pred)
0.0
>>> y_true = [1, 2, 3]
>>> y_pred = [3, 2, 1]
>>> d2_absolute_error_score(y_true, y_pred)
-1.0"""
[end of new definitions in sklearn/metrics/_regression.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 38ff5be25d0164bf9598bcfdde3b791ad6e261b0 | |
RDFLib__rdflib-1624 | 1,624 | RDFLib/rdflib | null | 292a6bcb739ae21a30cdc9a926e74744b943643f | 2021-12-28T13:42:12Z | diff --git a/rdflib/plugins/sparql/__init__.py b/rdflib/plugins/sparql/__init__.py
index eb9230bf1..97d8c56aa 100644
--- a/rdflib/plugins/sparql/__init__.py
+++ b/rdflib/plugins/sparql/__init__.py
@@ -35,7 +35,7 @@
from . import operators
from . import parserutils
-from .processor import prepareQuery, processUpdate
+from .processor import prepareQuery, prepareUpdate, processUpdate
assert parser
assert operators
diff --git a/rdflib/plugins/sparql/algebra.py b/rdflib/plugins/sparql/algebra.py
index b3d1a3bae..805e62a80 100644
--- a/rdflib/plugins/sparql/algebra.py
+++ b/rdflib/plugins/sparql/algebra.py
@@ -13,7 +13,7 @@
from rdflib import Literal, Variable, URIRef, BNode
-from rdflib.plugins.sparql.sparql import Prologue, Query
+from rdflib.plugins.sparql.sparql import Prologue, Query, Update
from rdflib.plugins.sparql.parserutils import CompValue, Expr
from rdflib.plugins.sparql.operators import (
and_,
@@ -761,7 +761,7 @@ def translateUpdate(q, base=None, initNs=None):
res.append(translateUpdate1(u, prologue))
- return res
+ return Update(prologue, res)
def translateQuery(q, base=None, initNs=None):
diff --git a/rdflib/plugins/sparql/processor.py b/rdflib/plugins/sparql/processor.py
index 9f463a779..a0419423f 100644
--- a/rdflib/plugins/sparql/processor.py
+++ b/rdflib/plugins/sparql/processor.py
@@ -26,6 +26,15 @@ def prepareQuery(queryString, initNs={}, base=None) -> Query:
return ret
+def prepareUpdate(updateString, initNs={}, base=None):
+ """
+ Parse and translate a SPARQL Update
+ """
+ ret = translateUpdate(parseUpdate(updateString), base, initNs)
+ ret._original_args = (updateString, initNs, base)
+ return ret
+
+
def processUpdate(graph, updateString, initBindings={}, initNs={}, base=None):
"""
Process a SPARQL Update Request
diff --git a/rdflib/plugins/sparql/sparql.py b/rdflib/plugins/sparql/sparql.py
index eedc9e746..526203c47 100644
--- a/rdflib/plugins/sparql/sparql.py
+++ b/rdflib/plugins/sparql/sparql.py
@@ -408,3 +408,13 @@ class Query:
def __init__(self, prologue, algebra):
self.prologue = prologue
self.algebra = algebra
+
+
+class Update:
+ """
+ A parsed and translated update
+ """
+
+ def __init__(self, prologue, algebra):
+ self.prologue = prologue
+ self.algebra = algebra
diff --git a/rdflib/plugins/sparql/update.py b/rdflib/plugins/sparql/update.py
index f979c3872..ef4b46b95 100644
--- a/rdflib/plugins/sparql/update.py
+++ b/rdflib/plugins/sparql/update.py
@@ -273,7 +273,7 @@ def evalUpdate(graph, update, initBindings={}):
"""
- for u in update:
+ for u in update.algebra:
initBindings = dict((Variable(k), v) for k, v in initBindings.items())
| diff --git a/test/test_sparql_prepare.py b/test/test_sparql_prepare.py
new file mode 100644
index 000000000..3a0f51324
--- /dev/null
+++ b/test/test_sparql_prepare.py
@@ -0,0 +1,43 @@
+import os
+from rdflib.plugins.sparql import prepareUpdate, prepareQuery
+from rdflib.namespace import FOAF
+from rdflib import (
+ Graph,
+ URIRef,
+)
+
+
+def test_prepare_update():
+
+ q = prepareUpdate(
+ """\
+PREFIX dc: <http://purl.org/dc/elements/1.1/>
+INSERT DATA
+{ <http://example/book3> dc:title "A new book" ;
+ dc:creator "A.N.Other" .
+ } ;
+""",
+ initNs={},
+ )
+
+ g = Graph()
+ g.update(q, initBindings={})
+ assert len(g) == 2
+
+
+def test_prepare_query():
+
+ q = prepareQuery(
+ "SELECT ?name WHERE { ?person foaf:knows/foaf:name ?name . }",
+ initNs={"foaf": FOAF},
+ )
+
+ g = Graph()
+ g.parse(
+ location=os.path.join(os.path.dirname(__file__), "..", "examples", "foaf.n3"),
+ format="n3",
+ )
+
+ tim = URIRef("http://www.w3.org/People/Berners-Lee/card#i")
+
+ assert len(list(g.query(q, initBindings={"person": tim}))) == 50
| [
{
"components": [
{
"doc": "Parse and translate a SPARQL Update",
"lines": [
29,
35
],
"name": "prepareUpdate",
"signature": "def prepareUpdate(updateString, initNs={}, base=None):",
"type": "function"
}
],
"file": "rdflib/p... | [
"test/test_sparql_prepare.py::test_prepare_update",
"test/test_sparql_prepare.py::test_prepare_query"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Feature prepareupdate
Fixes #272
Slightly speculative, other option is close #272 with `wontfix`
## Proposed Changes
Basically, follow the pattern established in `prepareQuery`
- Create an Update class
- Add a `prepareUpdate` to `processor.py`
- Propagate pattern
- Add test.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rdflib/plugins/sparql/processor.py]
(definition of prepareUpdate:)
def prepareUpdate(updateString, initNs={}, base=None):
"""Parse and translate a SPARQL Update"""
[end of new definitions in rdflib/plugins/sparql/processor.py]
[start of new definitions in rdflib/plugins/sparql/sparql.py]
(definition of Update:)
class Update:
"""A parsed and translated update"""
(definition of Update.__init__:)
def __init__(self, prologue, algebra):
[end of new definitions in rdflib/plugins/sparql/sparql.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
prepareUpdate method for pre-parsing SPARQL update statements
i.e. matching prepareQuery
----------
--------------------
</issues> | 0c11debb5178157baeac27b735e49a757916d2a6 | |
boto__botocore-2579 | 2,579 | boto/botocore | null | 63107d6f1fbf8a53b23dbf19f49ff849501c8ffd | 2021-12-21T21:23:56Z | diff --git a/.changes/next-release/enhancement-JSONFileCache-35511.json b/.changes/next-release/enhancement-JSONFileCache-35511.json
new file mode 100644
index 0000000000..115af241fa
--- /dev/null
+++ b/.changes/next-release/enhancement-JSONFileCache-35511.json
@@ -0,0 +1,5 @@
+{
+ "type": "enhancement",
+ "category": "JSONFileCache",
+ "description": "Add support for __delitem__ in JSONFileCache"
+}
diff --git a/botocore/credentials.py b/botocore/credentials.py
index 47a7b47161..a8fc99eb7c 100644
--- a/botocore/credentials.py
+++ b/botocore/credentials.py
@@ -22,6 +22,7 @@
from collections import namedtuple
from copy import deepcopy
from hashlib import sha1
+from pathlib import Path
from dateutil.parser import parse
from dateutil.tz import tzlocal, tzutc
@@ -307,6 +308,14 @@ def __getitem__(self, cache_key):
except (OSError, ValueError, IOError):
raise KeyError(cache_key)
+ def __delitem__(self, cache_key):
+ actual_key = self._convert_cache_key(cache_key)
+ try:
+ key_path = Path(actual_key)
+ key_path.unlink()
+ except FileNotFoundError:
+ raise KeyError(cache_key)
+
def __setitem__(self, cache_key, value):
full_key = self._convert_cache_key(cache_key)
try:
| diff --git a/tests/unit/test_credentials.py b/tests/unit/test_credentials.py
index ec9eb97988..08add9b0fd 100644
--- a/tests/unit/test_credentials.py
+++ b/tests/unit/test_credentials.py
@@ -17,7 +17,9 @@
import tempfile
import shutil
+import pytest
from dateutil.tz import tzlocal, tzutc
+from pathlib import Path
from botocore import credentials
from botocore.utils import ContainerMetadataFetcher
@@ -2663,6 +2665,29 @@ def test_can_override_existing_values(self):
self.cache['mykey'] = {'baz': 'newvalue'}
self.assertEqual(self.cache['mykey'], {'baz': 'newvalue'})
+ def test_can_delete_existing_values(self):
+ key_path = Path(os.path.join(self.tempdir, 'deleteme.json'))
+ self.cache['deleteme'] = {'foo': 'bar'}
+ assert self.cache['deleteme'] == {'foo': 'bar'}
+ assert key_path.exists()
+
+ del self.cache['deleteme']
+ # Validate key removed
+ with pytest.raises(KeyError):
+ self.cache['deleteme']
+ # Validate file removed
+ assert not key_path.exists()
+
+ self.cache['deleteme'] = {'bar': 'baz'}
+ assert self.cache['deleteme'] == {'bar': 'baz'}
+
+ def test_can_delete_missing_values(self):
+ key_path = Path(os.path.join(self.tempdir, 'deleteme.json'))
+ assert not key_path.exists()
+
+ with pytest.raises(KeyError):
+ del self.cache['deleteme']
+
def test_can_add_multiple_keys(self):
self.cache['mykey'] = {'foo': 'bar'}
self.cache['mykey2'] = {'baz': 'qux'}
| diff --git a/.changes/next-release/enhancement-JSONFileCache-35511.json b/.changes/next-release/enhancement-JSONFileCache-35511.json
new file mode 100644
index 0000000000..115af241fa
--- /dev/null
+++ b/.changes/next-release/enhancement-JSONFileCache-35511.json
@@ -0,0 +1,5 @@
+{
+ "type": "enhancement",
+ "category": "JSONFileCache",
+ "description": "Add support for __delitem__ in JSONFileCache"
+}
| [
{
"components": [
{
"doc": "",
"lines": [
311,
317
],
"name": "JSONFileCache.__delitem__",
"signature": "def __delitem__(self, cache_key):",
"type": "function"
}
],
"file": "botocore/credentials.py"
}
] | [
"tests/unit/test_credentials.py::TestJSONCache::test_can_delete_existing_values",
"tests/unit/test_credentials.py::TestJSONCache::test_can_delete_missing_values"
] | [
"tests/unit/test_credentials.py::TestCredentials::test_detect_nonascii_character",
"tests/unit/test_credentials.py::TestCredentials::test_unicode_input",
"tests/unit/test_credentials.py::TestRefreshableCredentials::test_detect_nonascii_character",
"tests/unit/test_credentials.py::TestRefreshableCredentials::t... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add __delitem__ functionality to JSONFileCache
This PR should address #2255 by adding the ability to programatically remove keys (and files) from the [JSONFileCache](https://github.com/boto/botocore/blob/develop/botocore/credentials.py#L278).
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in botocore/credentials.py]
(definition of JSONFileCache.__delitem__:)
def __delitem__(self, cache_key):
[end of new definitions in botocore/credentials.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 5e4b564dd0f9aab16a404251ebd3e675c9681492 | |
sympy__sympy-22713 | 22,713 | sympy/sympy | 1.11 | d7bddf4f882fe5887638cf3336e7b2f4e989020b | 2021-12-19T18:06:05Z | diff --git a/sympy/algebras/quaternion.py b/sympy/algebras/quaternion.py
index 2786da640873..c05360595809 100644
--- a/sympy/algebras/quaternion.py
+++ b/sympy/algebras/quaternion.py
@@ -3,12 +3,13 @@
from sympy.functions.elementary.complexes import (conjugate, im, re, sign)
from sympy.functions.elementary.exponential import (exp, log as ln)
from sympy.functions.elementary.miscellaneous import sqrt
-from sympy.functions.elementary.trigonometric import (acos, cos, sin)
+from sympy.functions.elementary.trigonometric import (acos, cos, sin, atan2)
from sympy.simplify.trigsimp import trigsimp
from sympy.integrals.integrals import integrate
from sympy.matrices.dense import MutableDenseMatrix as Matrix
from sympy.core.sympify import sympify
from sympy.core.expr import Expr
+from sympy.core.logic import fuzzy_not, fuzzy_or
from mpmath.libmp.libmpf import prec_to_dps
@@ -392,6 +393,7 @@ def normalize(self):
q = self
return q * (1/q.norm())
+
def inverse(self):
"""Returns the inverse of the quaternion."""
q = self
@@ -757,3 +759,406 @@ def to_rotation_matrix(self, v=None):
return Matrix([[m00, m01, m02, m03], [m10, m11, m12, m13],
[m20, m21, m22, m23], [m30, m31, m32, m33]])
+
+ def scalar_part(self):
+ r"""Returns scalar part($\mathbf{S}(q)$) of the quaternion q.
+
+ Explanation
+ ===========
+
+ Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{S}(q) = a$.
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q = Quaternion(4, 8, 13, 12)
+ >>> q.scalar_part()
+ 4
+
+ """
+
+ return self.a
+
+ def vector_part(self):
+ r"""
+ Returns vector part($\mathbf{V}(q)$) of the quaternion q.
+
+ Explanation
+ ===========
+
+ Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{V}(q) = bi + cj + dk$.
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q = Quaternion(1, 1, 1, 1)
+ >>> q.vector_part()
+ 0 + 1*i + 1*j + 1*k
+
+ >>> q = Quaternion(4, 8, 13, 12)
+ >>> q.vector_part()
+ 0 + 8*i + 13*j + 12*k
+
+ """
+
+ return Quaternion(0, self.b, self.c, self.d)
+
+ def axis(self):
+ r"""
+ Returns the axis($\mathbf{Ax}(q)$) of the quaternion.
+
+ Explanation
+ ===========
+
+ Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{Ax}(q)$ i.e., the versor of the vector part of that quaternion
+ equal to $\mathbf{U}[\mathbf{V}(q)]$.
+ The axis is always an imaginary unit with square equal to $-1 + 0i + 0j + 0k$.
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q = Quaternion(1, 1, 1, 1)
+ >>> q.axis()
+ 0 + sqrt(3)/3*i + sqrt(3)/3*j + sqrt(3)/3*k
+
+ See Also
+ ========
+
+ vector_part
+
+ """
+ axis = self.vector_part().normalize()
+
+ return Quaternion(0, axis.b, axis.c, axis.d)
+
+ def is_pure(self):
+ """
+ Returns true if the quaternion is pure, false if the quaternion is not pure
+ or returns none if it is unknown.
+
+ Explanation
+ ===========
+
+ A pure quaternion (also a vector quaternion) is a quaternion with scalar
+ part equal to 0.
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q = Quaternion(0, 8, 13, 12)
+ >>> q.is_pure()
+ True
+
+ See Also
+ ========
+ scalar_part
+
+ """
+
+ return self.a.is_zero
+
+ def is_zero_quaternion(self):
+ """
+ Returns true if the quaternion is a zero quaternion or false if it is not a zero quaternion
+ and None if the value is unknown.
+
+ Explanation
+ ===========
+
+ A zero quaternion is a quaternion with both scalar part and
+ vector part equal to 0.
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q = Quaternion(1, 0, 0, 0)
+ >>> q.is_zero_quaternion()
+ False
+
+ >>> q = Quaternion(0, 0, 0, 0)
+ >>> q.is_zero_quaternion()
+ True
+
+ See Also
+ ========
+ scalar_part
+ vector_part
+
+ """
+
+ return self.norm().is_zero
+
+ def angle(self):
+ r"""
+ Returns the angle of the quaternion measured in the real-axis plane.
+
+ Explanation
+ ===========
+
+ Given a quaternion $q = a + bi + cj + dk$ where a, b, c and d
+ are real numbers, returns the angle of the quaternion given by
+
+ .. math::
+ angle := atan2(\sqrt{b^2 + c^2 + d^2}, {a})
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q = Quaternion(1, 4, 4, 4)
+ >>> q.angle()
+ atan(4*sqrt(3))
+
+ """
+
+ return atan2(self.vector_part().norm(), self.scalar_part())
+
+
+ def arc_coplanar(self, other):
+ """
+ Returns True if the transformation arcs represented by the input quaternions happen in the same plane.
+
+ Explanation
+ ===========
+
+ Two quaternions are said to be coplanar (in this arc sense) when their axes are parallel.
+ The plane of a quaternion is the one normal to its axis.
+
+ Parameters
+ ==========
+
+ other : a Quaternion
+
+ Returns
+ =======
+
+ True : if the planes of the two quaternions are the same, apart from its orientation/sign.
+ False : if the planes of the two quaternions are not the same, apart from its orientation/sign.
+ None : if plane of either of the quaternion is unknown.
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q1 = Quaternion(1, 4, 4, 4)
+ >>> q2 = Quaternion(3, 8, 8, 8)
+ >>> Quaternion.arc_coplanar(q1, q2)
+ True
+
+ >>> q1 = Quaternion(2, 8, 13, 12)
+ >>> Quaternion.arc_coplanar(q1, q2)
+ False
+
+ See Also
+ ========
+
+ vector_coplanar
+ is_pure
+
+ """
+ if (self.is_zero_quaternion()) or (other.is_zero_quaternion()):
+ raise ValueError('Neither of the given quaternions can be 0')
+
+ return fuzzy_or([(self.axis() - other.axis()).is_zero_quaternion(), (self.axis() + other.axis()).is_zero_quaternion()])
+
+ @classmethod
+ def vector_coplanar(cls, q1, q2, q3):
+ r"""
+ Returns True if the axis of the pure quaternions seen as 3D vectors
+ q1, q2, and q3 are coplanar.
+
+ Explanation
+ ===========
+
+ Three pure quaternions are vector coplanar if the quaternions seen as 3D vectors are coplanar.
+
+ Parameters
+ ==========
+
+ q1 : a pure Quaternion.
+ q2 : a pure Quaternion.
+ q3 : a pure Quaternion.
+
+ Returns
+ =======
+
+ True : if the axis of the pure quaternions seen as 3D vectors
+ q1, q2, and q3 are coplanar.
+ False : if the axis of the pure quaternions seen as 3D vectors
+ q1, q2, and q3 are not coplanar.
+ None : if the axis of the pure quaternions seen as 3D vectors
+ q1, q2, and q3 are coplanar is unknown.
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q1 = Quaternion(0, 4, 4, 4)
+ >>> q2 = Quaternion(0, 8, 8, 8)
+ >>> q3 = Quaternion(0, 24, 24, 24)
+ >>> Quaternion.vector_coplanar(q1, q2, q3)
+ True
+
+ >>> q1 = Quaternion(0, 8, 16, 8)
+ >>> q2 = Quaternion(0, 8, 3, 12)
+ >>> Quaternion.vector_coplanar(q1, q2, q3)
+ False
+
+ See Also
+ ========
+
+ axis
+ is_pure
+
+ """
+
+ if fuzzy_not(q1.is_pure()) or fuzzy_not(q2.is_pure()) or fuzzy_not(q3.is_pure()):
+ raise ValueError('The given quaternions must be pure')
+
+ M = Matrix([[q1.b, q1.c, q1.d], [q2.b, q2.c, q2.d], [q3.b, q3.c, q3.d]]).det()
+ return M.is_zero
+
+ def parallel(self, other):
+ """
+ Returns True if the two pure quaternions seen as 3D vectors are parallel.
+
+ Explanation
+ ===========
+
+ Two pure quaternions are called parallel when their vector product is commutative which
+ implies that the quaternions seen as 3D vectors have same direction.
+
+ Parameters
+ ==========
+
+ other : a Quaternion
+
+ Returns
+ =======
+
+ True : if the two pure quaternions seen as 3D vectors are parallel.
+ False : if the two pure quaternions seen as 3D vectors are not parallel.
+ None : if the two pure quaternions seen as 3D vectors are parallel is unknown.
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q = Quaternion(0, 4, 4, 4)
+ >>> q1 = Quaternion(0, 8, 8, 8)
+ >>> q.parallel(q1)
+ True
+
+ >>> q1 = Quaternion(0, 8, 13, 12)
+ >>> q.parallel(q1)
+ False
+
+ """
+
+ if fuzzy_not(self.is_pure()) or fuzzy_not(other.is_pure()):
+ raise ValueError('The provided quaternions must be pure')
+
+ return (self*other - other*self).is_zero_quaternion()
+
+ def orthogonal(self, other):
+ """
+ Returns the orthogonality of two quaternions.
+
+ Explanation
+ ===========
+
+ Two pure quaternions are called orthogonal when their product is anti-commutative.
+
+ Parameters
+ ==========
+
+ other : a Quaternion
+
+ Returns
+ =======
+
+ True : if the two pure quaternions seen as 3D vectors are orthogonal.
+ False : if the two pure quaternions seen as 3D vectors are not orthogonal.
+ None : if the two pure quaternions seen as 3D vectors are orthogonal is unknown.
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q = Quaternion(0, 4, 4, 4)
+ >>> q1 = Quaternion(0, 8, 8, 8)
+ >>> q.orthogonal(q1)
+ False
+
+ >>> q1 = Quaternion(0, 2, 2, 0)
+ >>> q = Quaternion(0, 2, -2, 0)
+ >>> q.orthogonal(q1)
+ True
+
+ """
+
+ if fuzzy_not(self.is_pure()) or fuzzy_not(other.is_pure()):
+ raise ValueError('The given quaternions must be pure')
+
+ return (self*other + other*self).is_zero_quaternion()
+
+ def index_vector(self):
+ r"""
+ Returns the index vector of the quaternion.
+
+ Explanation
+ ===========
+
+ Index vector is given by $\mathbf{T}(q)$ multiplied by $\mathbf{Ax}(q)$ where $\mathbf{Ax}(q)$ is the axis of the quaternion q,
+ and mod(q) is the $\mathbf{T}(q)$ (magnitude) of the quaternion.
+
+ Returns
+ =======
+
+ Quaternion: representing index vector of the provided quaternion.
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q = Quaternion(2, 4, 2, 4)
+ >>> q.index_vector()
+ 0 + 4*sqrt(10)/3*i + 2*sqrt(10)/3*j + 4*sqrt(10)/3*k
+
+ See Also
+ ========
+
+ axis
+ norm
+
+ """
+
+ return self.norm() * self.axis()
+
+ def mensor(self):
+ """
+ Returns the natural logarithm of the norm(magnitude) of the quaternion.
+
+ Examples
+ ========
+
+ >>> from sympy.algebras.quaternion import Quaternion
+ >>> q = Quaternion(2, 4, 2, 4)
+ >>> q.mensor()
+ log(2*sqrt(10))
+ >>> q.norm()
+ 2*sqrt(10)
+
+ See Also
+ ========
+
+ norm
+
+ """
+
+ return ln(self.norm())
| diff --git a/sympy/algebras/tests/test_quaternion.py b/sympy/algebras/tests/test_quaternion.py
index 5fb6e08efe0f..82ba57eaab5b 100644
--- a/sympy/algebras/tests/test_quaternion.py
+++ b/sympy/algebras/tests/test_quaternion.py
@@ -5,7 +5,7 @@
from sympy.functions.elementary.complexes import (Abs, conjugate, im, re, sign)
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.miscellaneous import sqrt
-from sympy.functions.elementary.trigonometric import (acos, asin, cos, sin)
+from sympy.functions.elementary.trigonometric import (acos, asin, cos, sin, atan2, atan)
from sympy.integrals.integrals import integrate
from sympy.matrices.dense import Matrix
from sympy.simplify.trigsimp import trigsimp
@@ -141,6 +141,60 @@ def test_quaternion_functions():
n = Symbol('n', integer=True)
raises(TypeError, lambda: q1**n)
+ assert Quaternion(22, 23, 55, 8).scalar_part() == 22
+ assert Quaternion(w, x, y, z).scalar_part() == w
+
+ assert Quaternion(22, 23, 55, 8).vector_part() == Quaternion(0, 23, 55, 8)
+ assert Quaternion(w, x, y, z).vector_part() == Quaternion(0, x, y, z)
+
+ assert q1.axis() == Quaternion(0, 2*sqrt(29)/29, 3*sqrt(29)/29, 4*sqrt(29)/29)
+ assert q1.axis().pow(2) == Quaternion(-1, 0, 0, 0)
+ assert q0.axis().scalar_part() == 0
+ assert q.axis() == Quaternion(0, x/sqrt(x**2 + y**2 + z**2), y/sqrt(x**2 + y**2 + z**2), z/sqrt(x**2 + y**2 + z**2))
+
+ assert q0.is_pure() == True
+ assert q1.is_pure() == False
+ assert Quaternion(0, 0, 0, 3).is_pure() == True
+ assert Quaternion(0, 2, 10, 3).is_pure() == True
+ assert Quaternion(w, 2, 10, 3).is_pure() == None
+
+ assert q1.angle() == atan(sqrt(29))
+ assert q.angle() == atan2(sqrt(x**2 + y**2 + z**2), w)
+
+ assert Quaternion.arc_coplanar(q1, Quaternion(2, 4, 6, 8)) == True
+ assert Quaternion.arc_coplanar(q1, Quaternion(1, -2, -3, -4)) == True
+ assert Quaternion.arc_coplanar(q1, Quaternion(1, 8, 12, 16)) == True
+ assert Quaternion.arc_coplanar(q1, Quaternion(1, 2, 3, 4)) == True
+ assert Quaternion.arc_coplanar(q1, Quaternion(w, 4, 6, 8)) == True
+ assert Quaternion.arc_coplanar(q1, Quaternion(2, 7, 4, 1)) == False
+ assert Quaternion.arc_coplanar(q1, Quaternion(w, x, y, z)) == None
+ raises(ValueError, lambda: Quaternion.arc_coplanar(q1, q0))
+
+ assert Quaternion.vector_coplanar(Quaternion(0, 8, 12, 16), Quaternion(0, 4, 6, 8), Quaternion(0, 2, 3, 4)) == True
+ assert Quaternion.vector_coplanar(Quaternion(0, 0, 0, 0), Quaternion(0, 4, 6, 8), Quaternion(0, 2, 3, 4)) == True
+ assert Quaternion.vector_coplanar(Quaternion(0, 8, 2, 6), Quaternion(0, 1, 6, 6), Quaternion(0, 0, 3, 4)) == False
+ assert Quaternion.vector_coplanar(Quaternion(0, 1, 3, 4), Quaternion(0, 4, w, 6), Quaternion(0, 6, 8, 1)) == None
+ raises(ValueError, lambda: Quaternion.vector_coplanar(q0, Quaternion(0, 4, 6, 8), q1))
+
+ assert Quaternion(0, 1, 2, 3).parallel(Quaternion(0, 2, 4, 6)) == True
+ assert Quaternion(0, 1, 2, 3).parallel(Quaternion(0, 2, 2, 6)) == False
+ assert Quaternion(0, 1, 2, 3).parallel(Quaternion(w, x, y, 6)) == None
+ raises(ValueError, lambda: q0.parallel(q1))
+
+ assert Quaternion(0, 1, 2, 3).orthogonal(Quaternion(0, -2, 1, 0)) == True
+ assert Quaternion(0, 2, 4, 7).orthogonal(Quaternion(0, 2, 2, 6)) == False
+ assert Quaternion(0, 2, 4, 7).orthogonal(Quaternion(w, x, y, 6)) == None
+ raises(ValueError, lambda: q0.orthogonal(q1))
+
+ assert q1.index_vector() == Quaternion(0, 2*sqrt(870)/29, 3*sqrt(870)/29, 4*sqrt(870)/29)
+ assert Quaternion(0, 3, 9, 4).index_vector() == Quaternion(0, 3, 9, 4)
+
+ assert Quaternion(4, 3, 9, 4).mensor() == log(sqrt(122))
+ assert Quaternion(3, 3, 0, 2).mensor() == log(sqrt(22))
+
+ assert q0.is_zero_quaternion() == True
+ assert q1.is_zero_quaternion() == False
+ assert Quaternion(w, 0, 0, 0).is_zero_quaternion() == None
def test_quaternion_conversions():
q1 = Quaternion(1, 2, 3, 4)
| [
{
"components": [
{
"doc": "Returns scalar part($\\mathbf{S}(q)$) of the quaternion q.\n\nExplanation\n===========\n\nGiven a quaternion $q = a + bi + cj + dk$, returns $\\mathbf{S}(q) = a$.\n\nExamples\n========\n\n>>> from sympy.algebras.quaternion import Quaternion\n>>> q = Quaternion(4, 8, 13,... | [
"test_quaternion_functions"
] | [
"test_quaternion_construction",
"test_quaternion_axis_angle",
"test_quaternion_axis_angle_simplification",
"test_quaternion_complex_real_addition",
"test_quaternion_evalf",
"test_quaternion_conversions",
"test_quaternion_rotation_iss1593",
"test_quaternion_multiplication"
] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Quaternion - added Hamilton's "operators" for Quaternion class
<!-- Your title above should be a short description of what
was changed. Do not include the issue number in the title. -->
#### References to other Issues or PRs
<!-- If this pull request fixes an issue, write "Fixes #NNNN" in that exact
format, e.g. "Fixes #1234" (see
https://tinyurl.com/auto-closing for more information). Also, please
write a comment on that issue linking back to this pull request once it is
open. -->
#### Brief description of what is fixed or changed
This is a partial fix for #20769 and also part of this PR is taken from unmerged PR #20853 and made changes in code and tests as requested by @danilobellini
#### Other comments
#### Release Notes
<!-- Write the release notes for this release below between the BEGIN and END
statements. The basic format is a bulleted list with the name of the subpackage
and the release note for this PR. For example:
* solvers
* Added a new solver for logarithmic equations.
* functions
* Fixed a bug with log of integers.
or if no release note(s) should be included use:
NO ENTRY
See https://github.com/sympy/sympy/wiki/Writing-Release-Notes for more
information on how to write release notes. The bot will check your release
notes automatically to see if they are formatted correctly. -->
<!-- BEGIN RELEASE NOTES -->
* algebras
* Added hamilton's operators in Quaternion class in quaternion.py.
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/algebras/quaternion.py]
(definition of Quaternion.scalar_part:)
def scalar_part(self):
"""Returns scalar part($\mathbf{S}(q)$) of the quaternion q.
Explanation
===========
Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{S}(q) = a$.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(4, 8, 13, 12)
>>> q.scalar_part()
4"""
(definition of Quaternion.vector_part:)
def vector_part(self):
"""Returns vector part($\mathbf{V}(q)$) of the quaternion q.
Explanation
===========
Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{V}(q) = bi + cj + dk$.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 1, 1, 1)
>>> q.vector_part()
0 + 1*i + 1*j + 1*k
>>> q = Quaternion(4, 8, 13, 12)
>>> q.vector_part()
0 + 8*i + 13*j + 12*k"""
(definition of Quaternion.axis:)
def axis(self):
"""Returns the axis($\mathbf{Ax}(q)$) of the quaternion.
Explanation
===========
Given a quaternion $q = a + bi + cj + dk$, returns $\mathbf{Ax}(q)$ i.e., the versor of the vector part of that quaternion
equal to $\mathbf{U}[\mathbf{V}(q)]$.
The axis is always an imaginary unit with square equal to $-1 + 0i + 0j + 0k$.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 1, 1, 1)
>>> q.axis()
0 + sqrt(3)/3*i + sqrt(3)/3*j + sqrt(3)/3*k
See Also
========
vector_part"""
(definition of Quaternion.is_pure:)
def is_pure(self):
"""Returns true if the quaternion is pure, false if the quaternion is not pure
or returns none if it is unknown.
Explanation
===========
A pure quaternion (also a vector quaternion) is a quaternion with scalar
part equal to 0.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(0, 8, 13, 12)
>>> q.is_pure()
True
See Also
========
scalar_part"""
(definition of Quaternion.is_zero_quaternion:)
def is_zero_quaternion(self):
"""Returns true if the quaternion is a zero quaternion or false if it is not a zero quaternion
and None if the value is unknown.
Explanation
===========
A zero quaternion is a quaternion with both scalar part and
vector part equal to 0.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 0, 0, 0)
>>> q.is_zero_quaternion()
False
>>> q = Quaternion(0, 0, 0, 0)
>>> q.is_zero_quaternion()
True
See Also
========
scalar_part
vector_part"""
(definition of Quaternion.angle:)
def angle(self):
"""Returns the angle of the quaternion measured in the real-axis plane.
Explanation
===========
Given a quaternion $q = a + bi + cj + dk$ where a, b, c and d
are real numbers, returns the angle of the quaternion given by
.. math::
angle := atan2(\sqrt{b^2 + c^2 + d^2}, {a})
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(1, 4, 4, 4)
>>> q.angle()
atan(4*sqrt(3))"""
(definition of Quaternion.arc_coplanar:)
def arc_coplanar(self, other):
"""Returns True if the transformation arcs represented by the input quaternions happen in the same plane.
Explanation
===========
Two quaternions are said to be coplanar (in this arc sense) when their axes are parallel.
The plane of a quaternion is the one normal to its axis.
Parameters
==========
other : a Quaternion
Returns
=======
True : if the planes of the two quaternions are the same, apart from its orientation/sign.
False : if the planes of the two quaternions are not the same, apart from its orientation/sign.
None : if plane of either of the quaternion is unknown.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q1 = Quaternion(1, 4, 4, 4)
>>> q2 = Quaternion(3, 8, 8, 8)
>>> Quaternion.arc_coplanar(q1, q2)
True
>>> q1 = Quaternion(2, 8, 13, 12)
>>> Quaternion.arc_coplanar(q1, q2)
False
See Also
========
vector_coplanar
is_pure"""
(definition of Quaternion.vector_coplanar:)
def vector_coplanar(cls, q1, q2, q3):
"""Returns True if the axis of the pure quaternions seen as 3D vectors
q1, q2, and q3 are coplanar.
Explanation
===========
Three pure quaternions are vector coplanar if the quaternions seen as 3D vectors are coplanar.
Parameters
==========
q1 : a pure Quaternion.
q2 : a pure Quaternion.
q3 : a pure Quaternion.
Returns
=======
True : if the axis of the pure quaternions seen as 3D vectors
q1, q2, and q3 are coplanar.
False : if the axis of the pure quaternions seen as 3D vectors
q1, q2, and q3 are not coplanar.
None : if the axis of the pure quaternions seen as 3D vectors
q1, q2, and q3 are coplanar is unknown.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q1 = Quaternion(0, 4, 4, 4)
>>> q2 = Quaternion(0, 8, 8, 8)
>>> q3 = Quaternion(0, 24, 24, 24)
>>> Quaternion.vector_coplanar(q1, q2, q3)
True
>>> q1 = Quaternion(0, 8, 16, 8)
>>> q2 = Quaternion(0, 8, 3, 12)
>>> Quaternion.vector_coplanar(q1, q2, q3)
False
See Also
========
axis
is_pure"""
(definition of Quaternion.parallel:)
def parallel(self, other):
"""Returns True if the two pure quaternions seen as 3D vectors are parallel.
Explanation
===========
Two pure quaternions are called parallel when their vector product is commutative which
implies that the quaternions seen as 3D vectors have same direction.
Parameters
==========
other : a Quaternion
Returns
=======
True : if the two pure quaternions seen as 3D vectors are parallel.
False : if the two pure quaternions seen as 3D vectors are not parallel.
None : if the two pure quaternions seen as 3D vectors are parallel is unknown.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(0, 4, 4, 4)
>>> q1 = Quaternion(0, 8, 8, 8)
>>> q.parallel(q1)
True
>>> q1 = Quaternion(0, 8, 13, 12)
>>> q.parallel(q1)
False"""
(definition of Quaternion.orthogonal:)
def orthogonal(self, other):
"""Returns the orthogonality of two quaternions.
Explanation
===========
Two pure quaternions are called orthogonal when their product is anti-commutative.
Parameters
==========
other : a Quaternion
Returns
=======
True : if the two pure quaternions seen as 3D vectors are orthogonal.
False : if the two pure quaternions seen as 3D vectors are not orthogonal.
None : if the two pure quaternions seen as 3D vectors are orthogonal is unknown.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(0, 4, 4, 4)
>>> q1 = Quaternion(0, 8, 8, 8)
>>> q.orthogonal(q1)
False
>>> q1 = Quaternion(0, 2, 2, 0)
>>> q = Quaternion(0, 2, -2, 0)
>>> q.orthogonal(q1)
True"""
(definition of Quaternion.index_vector:)
def index_vector(self):
"""Returns the index vector of the quaternion.
Explanation
===========
Index vector is given by $\mathbf{T}(q)$ multiplied by $\mathbf{Ax}(q)$ where $\mathbf{Ax}(q)$ is the axis of the quaternion q,
and mod(q) is the $\mathbf{T}(q)$ (magnitude) of the quaternion.
Returns
=======
Quaternion: representing index vector of the provided quaternion.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(2, 4, 2, 4)
>>> q.index_vector()
0 + 4*sqrt(10)/3*i + 2*sqrt(10)/3*j + 4*sqrt(10)/3*k
See Also
========
axis
norm"""
(definition of Quaternion.mensor:)
def mensor(self):
"""Returns the natural logarithm of the norm(magnitude) of the quaternion.
Examples
========
>>> from sympy.algebras.quaternion import Quaternion
>>> q = Quaternion(2, 4, 2, 4)
>>> q.mensor()
log(2*sqrt(10))
>>> q.norm()
2*sqrt(10)
See Also
========
norm"""
[end of new definitions in sympy/algebras/quaternion.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | edf24253833ca153cb6d29ae54092ecebe29614c | ||
sympy__sympy-22705 | 22,705 | sympy/sympy | 1.10 | 3ff4717b6aef6086e78f01cdfa06f64ae23aed7e | 2021-12-18T21:04:49Z | diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py
index b2c7850be457..a8a69cd53623 100644
--- a/sympy/printing/latex.py
+++ b/sympy/printing/latex.py
@@ -2254,6 +2254,10 @@ def _print_ConditionSet(self, s):
self._print(s.base_set),
self._print(s.condition))
+ def _print_PowerSet(self, expr):
+ arg_print = self._print(expr.args[0])
+ return r"\mathcal{{P}}\left({}\right)".format(arg_print)
+
def _print_ComplexRegion(self, s):
vars_print = ', '.join([self._print(var) for var in s.variables])
return r"\left\{%s\; \middle|\; %s \in %s \right\}" % (
@@ -2385,6 +2389,25 @@ def _print_RootSum(self, expr):
return r"\operatorname{%s} {\left(%s\right)}" % (cls,
", ".join(args))
+ def _print_OrdinalOmega(self, expr):
+ return r"\omega"
+
+ def _print_OmegaPower(self, expr):
+ exp, mul = expr.args
+ if mul != 1:
+ if exp != 1:
+ return r"{} \omega^{{{}}}".format(mul, exp)
+ else:
+ return r"{} \omega".format(mul)
+ else:
+ if exp != 1:
+ return r"\omega^{{{}}}".format(exp)
+ else:
+ return r"\omega"
+
+ def _print_Ordinal(self, expr):
+ return " + ".join([self._print(arg) for arg in expr.args])
+
def _print_PolyElement(self, poly):
mul_symbol = self._settings['mul_symbol_latex']
return poly.str(self, PRECEDENCE, "{%s}^{%d}", mul_symbol)
| diff --git a/sympy/printing/tests/test_latex.py b/sympy/printing/tests/test_latex.py
index 8934fe5c3049..99eb1c68b75e 100644
--- a/sympy/printing/tests/test_latex.py
+++ b/sympy/printing/tests/test_latex.py
@@ -61,6 +61,8 @@
from sympy.sets.conditionset import ConditionSet
from sympy.sets.contains import Contains
from sympy.sets.fancysets import (ComplexRegion, ImageSet, Range)
+from sympy.sets.ordinals import Ordinal, OrdinalOmega, OmegaPower
+from sympy.sets.powerset import PowerSet
from sympy.sets.sets import (FiniteSet, Interval, Union, Intersection, Complement, SymmetricDifference, ProductSet)
from sympy.sets.setexpr import SetExpr
from sympy.stats.crv_types import Normal
@@ -1023,6 +1025,20 @@ def test_latex_productset():
latex(line), latex(bigline), latex(fset))
+def test_latex_powerset():
+ fset = FiniteSet(1, 2, 3)
+ assert latex(PowerSet(fset)) == r'\mathcal{P}\left(\left\{1, 2, 3\right\}\right)'
+
+
+def test_latex_ordinals():
+ w = OrdinalOmega()
+ assert latex(w) == r"\omega"
+ wp = OmegaPower(2, 3)
+ assert latex(wp) == r'3 \omega^{2}'
+ assert latex(Ordinal(wp, OmegaPower(1, 1))) == r'3 \omega^{2} + \omega'
+ assert latex(Ordinal(OmegaPower(2, 1), OmegaPower(1, 2))) == r'\omega^{2} + 2 \omega'
+
+
def test_set_operators_parenthesis():
a, b, c, d = symbols('a:d')
A = FiniteSet(a)
| [
{
"components": [
{
"doc": "",
"lines": [
2257,
2259
],
"name": "LatexPrinter._print_PowerSet",
"signature": "def _print_PowerSet(self, expr):",
"type": "function"
},
{
"doc": "",
"lines": [
2392,
... | [
"test_latex_powerset",
"test_latex_ordinals"
] | [
"test_printmethod",
"test_latex_basic",
"test_latex_builtins",
"test_latex_SingularityFunction",
"test_latex_cycle",
"test_latex_permutation",
"test_latex_Float",
"test_latex_vector_expressions",
"test_latex_symbols",
"test_latex_functions",
"test_function_subclass_different_name",
"test_hyper... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Added LaTeX printing for PowerSet and ordinals
<!-- Your title above should be a short description of what
was changed. Do not include the issue number in the title. -->
#### References to other Issues or PRs
<!-- If this pull request fixes an issue, write "Fixes #NNNN" in that exact
format, e.g. "Fixes #1234" (see
https://tinyurl.com/auto-closing for more information). Also, please
write a comment on that issue linking back to this pull request once it is
open. -->
Related to
Builds on #22661 #22663 #22665 so should be merged in order.
#### Brief description of what is fixed or changed
LaTeX printing for PowerSet and ordinals


#### Other comments
#### Release Notes
<!-- Write the release notes for this release below between the BEGIN and END
statements. The basic format is a bulleted list with the name of the subpackage
and the release note for this PR. For example:
* solvers
* Added a new solver for logarithmic equations.
* functions
* Fixed a bug with log of integers.
or if no release note(s) should be included use:
NO ENTRY
See https://github.com/sympy/sympy/wiki/Writing-Release-Notes for more
information on how to write release notes. The bot will check your release
notes automatically to see if they are formatted correctly. -->
<!-- BEGIN RELEASE NOTES -->
* printing
* LaTeX printing for `PowerSet` and ordinals.
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/printing/latex.py]
(definition of LatexPrinter._print_PowerSet:)
def _print_PowerSet(self, expr):
(definition of LatexPrinter._print_OrdinalOmega:)
def _print_OrdinalOmega(self, expr):
(definition of LatexPrinter._print_OmegaPower:)
def _print_OmegaPower(self, expr):
(definition of LatexPrinter._print_Ordinal:)
def _print_Ordinal(self, expr):
[end of new definitions in sympy/printing/latex.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3e8695add7a25c8d70aeba7d6137496df02863fd | ||
matplotlib__matplotlib-21977 | 21,977 | matplotlib/matplotlib | 3.5 | ac2a14581949bcf869e969f28a21ee479d2fc250 | 2021-12-16T13:31:39Z | diff --git a/lib/matplotlib/patches.py b/lib/matplotlib/patches.py
index b83714bdcd7b..544a626b8abe 100644
--- a/lib/matplotlib/patches.py
+++ b/lib/matplotlib/patches.py
@@ -806,6 +806,18 @@ def get_xy(self):
"""Return the left and bottom coords of the rectangle as a tuple."""
return self._x0, self._y0
+ def get_corners(self):
+ """
+ Return the corners of the rectangle, moving anti-clockwise from
+ (x0, y0).
+ """
+ return self.get_patch_transform().transform(
+ [(0, 0), (1, 0), (1, 1), (0, 1)])
+
+ def get_center(self):
+ """Return the centre of the rectangle."""
+ return self.get_patch_transform().transform((0.5, 0.5))
+
def get_width(self):
"""Return the width of the rectangle."""
return self._width
@@ -1657,6 +1669,16 @@ def get_angle(self):
angle = property(get_angle, set_angle)
+ def get_corners(self):
+ """
+ Return the corners of the ellipse bounding box.
+
+ The bounding box orientation is moving anti-clockwise from the
+ lower left corner defined before rotation.
+ """
+ return self.get_patch_transform().transform(
+ [(-1, -1), (1, -1), (1, 1), (-1, 1)])
+
class Annulus(Patch):
"""
| diff --git a/lib/matplotlib/tests/test_patches.py b/lib/matplotlib/tests/test_patches.py
index 9487758c8aef..6a8ddc87f3ae 100644
--- a/lib/matplotlib/tests/test_patches.py
+++ b/lib/matplotlib/tests/test_patches.py
@@ -6,7 +6,7 @@
import pytest
import matplotlib as mpl
-from matplotlib.patches import (Annulus, Patch, Polygon, Rectangle,
+from matplotlib.patches import (Annulus, Ellipse, Patch, Polygon, Rectangle,
FancyArrowPatch)
from matplotlib.testing.decorators import image_comparison, check_figures_equal
from matplotlib.transforms import Bbox
@@ -54,6 +54,54 @@ def test_Polygon_close():
assert_array_equal(p.get_xy(), xyclosed)
+def test_corner_center():
+ loc = [10, 20]
+ width = 1
+ height = 2
+
+ # Rectangle
+ # No rotation
+ corners = ((10, 20), (11, 20), (11, 22), (10, 22))
+ rect = Rectangle(loc, width, height)
+ assert_array_equal(rect.get_corners(), corners)
+ assert_array_equal(rect.get_center(), (10.5, 21))
+
+ # 90 deg rotation
+ corners_rot = ((10, 20), (10, 21), (8, 21), (8, 20))
+ rect.set_angle(90)
+ assert_array_equal(rect.get_corners(), corners_rot)
+ assert_array_equal(rect.get_center(), (9, 20.5))
+
+ # Rotation not a multiple of 90 deg
+ theta = 33
+ t = mtransforms.Affine2D().rotate_around(*loc, np.deg2rad(theta))
+ corners_rot = t.transform(corners)
+ rect.set_angle(theta)
+ assert_almost_equal(rect.get_corners(), corners_rot)
+
+ # Ellipse
+ loc = [loc[0] + width / 2,
+ loc[1] + height / 2]
+ ellipse = Ellipse(loc, width, height)
+
+ # No rotation
+ assert_array_equal(ellipse.get_corners(), corners)
+
+ # 90 deg rotation
+ corners_rot = ((11.5, 20.5), (11.5, 21.5), (9.5, 21.5), (9.5, 20.5))
+ ellipse.set_angle(90)
+ assert_array_equal(ellipse.get_corners(), corners_rot)
+ # Rotation shouldn't change ellipse center
+ assert_array_equal(ellipse.get_center(), loc)
+
+ # Rotation not a multiple of 90 deg
+ theta = 33
+ t = mtransforms.Affine2D().rotate_around(*loc, np.deg2rad(theta))
+ corners_rot = t.transform(corners)
+ ellipse.set_angle(theta)
+ assert_almost_equal(ellipse.get_corners(), corners_rot)
+
+
def test_rotate_rect():
loc = np.asarray([1.0, 2.0])
width = 2
| [
{
"components": [
{
"doc": "Return the corners of the rectangle, moving anti-clockwise from\n(x0, y0).",
"lines": [
805,
811
],
"name": "Rectangle.get_corners",
"signature": "def get_corners(self):",
"type": "function"
},
{
... | [
"lib/matplotlib/tests/test_patches.py::test_corner_center"
] | [
"lib/matplotlib/tests/test_patches.py::test_Polygon_close",
"lib/matplotlib/tests/test_patches.py::test_rotate_rect",
"lib/matplotlib/tests/test_patches.py::test_rotate_rect_draw[png]",
"lib/matplotlib/tests/test_patches.py::test_negative_rect",
"lib/matplotlib/tests/test_patches.py::test_clip_to_bbox[png]"... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add corner coordinate helper methods to Ellipse/Rectangle
## PR Summary
This is pulled out of https://github.com/matplotlib/matplotlib/pull/21945 because it's a standalone feature, and I think it's worth the scrutiny of a separate PR.
## PR Checklist
<!-- Please mark any checkboxes that do not apply to this PR as [N/A]. -->
**Tests and Styling**
- [x] Has pytest style unit tests (and `pytest` passes).
- [x] Is [Flake 8](https://flake8.pycqa.org/en/latest/) compliant (install `flake8-docstrings` and run `flake8 --docstring-convention=all`).
**Documentation**
- [ ] New features are documented, with examples if plot related.
- [ ] New features have an entry in `doc/users/next_whats_new/` (follow instructions in README.rst there).
- [ ] API changes documented in `doc/api/next_api_changes/` (follow instructions in README.rst there).
- [ ] Documentation is sphinx and numpydoc compliant (the docs should [build](https://matplotlib.org/devel/documenting_mpl.html#building-the-docs) without error).
<!--
Thank you so much for your PR! To help us review your contribution, please
consider the following points:
- A development guide is available at https://matplotlib.org/devdocs/devel/index.html.
- Help with git and github is available at
https://matplotlib.org/devel/gitwash/development_workflow.html.
- Do not create the PR out of main, but out of a separate branch.
- The PR title should summarize the changes, for example "Raise ValueError on
non-numeric input to set_xlim". Avoid non-descriptive titles such as
"Addresses issue #8576".
- The summary should provide at least 1-2 sentences describing the pull request
in detail (Why is this change required? What problem does it solve?) and
link to any relevant issues.
- If you are contributing fixes to docstrings, please pay attention to
http://matplotlib.org/devel/documenting_mpl.html#formatting. In particular,
note the difference between using single backquotes, double backquotes, and
asterisks in the markup.
We understand that PRs can sometimes be overwhelming, especially as the
reviews start coming in. Please let us know if the reviews are unclear or
the recommended next step seems overly demanding, if you would like help in
addressing a reviewer's comments, or if you have been waiting too long to hear
back on your PR.
-->
----------
Sorry for the messy PR... I've dropped edge_centers (I'll add them as private API in the widget PR), and should have resolved all the other comments.
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in lib/matplotlib/patches.py]
(definition of Rectangle.get_corners:)
def get_corners(self):
"""Return the corners of the rectangle, moving anti-clockwise from
(x0, y0)."""
(definition of Rectangle.get_center:)
def get_center(self):
"""Return the centre of the rectangle."""
(definition of Ellipse.get_corners:)
def get_corners(self):
"""Return the corners of the ellipse bounding box.
The bounding box orientation is moving anti-clockwise from the
lower left corner defined before rotation."""
[end of new definitions in lib/matplotlib/patches.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3d6c3da884fafae4654df68144391cfe9be6f134 | ||
Textualize__rich-1759 | 1,759 | Textualize/rich | null | 9f43cccfce3b39b8ac637b4e8cdfe2f0946e9c23 | 2021-12-15T16:18:18Z | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 596bd99779..d3967faec4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,7 +5,11 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-## [Unreleased]
+## 12.1.0
+
+### Added
+
+- Progress.open and Progress.wrap_file method to track the progress while reading from a file or file-like object https://github.com/willmcgugan/rich/pull/1759
### Added
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 3dc31bf805..aca64423dc 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -18,6 +18,7 @@ The following people have contributed to the development of Rich:
- [Finn Hughes](https://github.com/finnhughes)
- [Josh Karpel](https://github.com/JoshKarpel)
- [Andrew Kettmann](https://github.com/akettmann)
+- [Martin Larralde](https://github.com/althonos)
- [Hedy Li](https://github.com/hedythedev)
- [Luka Mamukashvili](https://github.com/UltraStudioLTD)
- [Alexander Mancevice](https://github.com/amancevice)
diff --git a/docs/source/progress.rst b/docs/source/progress.rst
index 4ce3588368..f905b255e5 100644
--- a/docs/source/progress.rst
+++ b/docs/source/progress.rst
@@ -26,6 +26,16 @@ For basic usage call the :func:`~rich.progress.track` function, which accepts a
for n in track(range(n), description="Processing..."):
do_work(n)
+
+To get a progress bar while reading from a file, you may consider using the :func:`~rich.progress.read` function, which accepts a path, or a *file-like* object. It will return a *file-like* object in *binary mode* that will update the progress information as it's being read from. Here's an example, tracking the progresses made by :func:`json.load` to load a file::
+
+ import json
+ from rich.progress import read
+
+ with read("data.json", description="Loading data...") as f:
+ data = json.load(f)
+
+
Advanced usage
--------------
@@ -34,9 +44,9 @@ If you require multiple tasks in the display, or wish to configure the columns i
The Progress class is designed to be used as a *context manager* which will start and stop the progress display automatically.
Here's a simple example::
-
+
import time
-
+
from rich.progress import Progress
with Progress() as progress:
@@ -179,7 +189,7 @@ If you have another Console object you want to use, pass it in to the :class:`~r
with Progress(console=my_console) as progress:
my_console.print("[bold blue]Starting work!")
do_work(progress)
-
+
Redirecting stdout / stderr
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -199,6 +209,42 @@ If the :class:`~rich.progress.Progress` class doesn't offer exactly what you nee
def get_renderables(self):
yield Panel(self.make_tasks_table(self.tasks))
+Reading from a file
+~~~~~~~~~~~~~~~~~~~
+
+You can obtain a progress-tracking reader using the :meth:`~rich.progress.Progress.open` method by giving it a path. You can specify the number of bytes to be read, but by default :meth:`~rich.progress.Progress.open` will query the size of the file with :func:`os.stat`. You are responsible for closing the file, and you should consider using a *context* to make sure it is closed ::
+
+ import json
+ from rich.progress import Progress
+
+ with Progress() as progress:
+ with progress.open("data.json", "rb") as file:
+ json.load(file)
+
+
+Note that in the above snippet we use the `"rb"` mode, because we needed the file to be opened in binary mode to pass it to :func:`json.load`. If the API consuming the file is expecting an object in *text mode* (for instance, :func:`csv.reader`), you can open the file with the `"r"` mode, which happens to be the default ::
+
+ from rich.progress import Progress
+
+ with Progress() as progress:
+ with progress.open("README.md") as file:
+ for line in file:
+ print(line)
+
+
+Reading from a file-like object
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can obtain a progress-tracking reader wrapping a file-like object using the :meth:`~rich.progress.Progress.wrap_file` method. The file-like object must be in *binary mode*, and a total must be provided, unless it was provided to a :class:`~rich.progress.Task` created beforehand. The returned reader may be used in a context, but will not take care of closing the wrapped file ::
+
+ import json
+ from rich.progress import Progress
+
+ with Progress() as progress:
+ with open("data.json", "rb") as file:
+ json.load(progress.wrap_file(file, total=2048))
+
+
Multiple Progress
-----------------
@@ -208,4 +254,3 @@ Example
-------
See `downloader.py <https://github.com/willmcgugan/rich/blob/master/examples/downloader.py>`_ for a realistic application of a progress display. This script can download multiple concurrent files with a progress bar, transfer speed and file size.
-
diff --git a/examples/cp_progress.py b/examples/cp_progress.py
new file mode 100644
index 0000000000..0f4059d024
--- /dev/null
+++ b/examples/cp_progress.py
@@ -0,0 +1,39 @@
+"""
+A very minimal `cp` clone that displays a progress bar.
+"""
+import os
+import shutil
+import sys
+
+from rich.progress import (
+ BarColumn,
+ DownloadColumn,
+ Progress,
+ TaskID,
+ TextColumn,
+ TimeRemainingColumn,
+ TransferSpeedColumn,
+)
+
+progress = Progress(
+ TextColumn("[bold blue]{task.description}", justify="right"),
+ BarColumn(bar_width=None),
+ "[progress.percentage]{task.percentage:>3.1f}%",
+ "•",
+ DownloadColumn(),
+ "•",
+ TransferSpeedColumn(),
+ "•",
+ TimeRemainingColumn(),
+)
+
+if __name__ == "__main__":
+ if len(sys.argv) == 3:
+
+ with progress:
+ desc = os.path.basename(sys.argv[1])
+ with progress.read(sys.argv[1], description=desc) as src:
+ with open(sys.argv[2], "wb") as dst:
+ shutil.copyfileobj(src, dst)
+ else:
+ print("Usage:\n\tpython cp_progress.py SRC DST")
diff --git a/rich/progress.py b/rich/progress.py
index e4abbdb66a..a3b30a85bc 100644
--- a/rich/progress.py
+++ b/rich/progress.py
@@ -1,28 +1,44 @@
+import io
+import sys
+import typing
+import warnings
from abc import ABC, abstractmethod
from collections import deque
from collections.abc import Sized
from dataclasses import dataclass, field
from datetime import timedelta
+from io import RawIOBase, UnsupportedOperation
from math import ceil
+from mmap import mmap
+from os import PathLike, stat
from threading import Event, RLock, Thread
from types import TracebackType
from typing import (
Any,
+ BinaryIO,
Callable,
+ ContextManager,
Deque,
Dict,
+ Generic,
Iterable,
List,
NamedTuple,
NewType,
Optional,
Sequence,
+ TextIO,
Tuple,
Type,
TypeVar,
Union,
)
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from typing_extensions import Literal # pragma: no cover
+
from . import filesize, get_console
from .console import Console, JustifyMethod, RenderableType, Group
from .highlighter import Highlighter
@@ -41,6 +57,9 @@
GetTimeCallable = Callable[[], float]
+_I = typing.TypeVar("_I", TextIO, BinaryIO)
+
+
class _TrackThread(Thread):
"""A thread to periodically update progress."""
@@ -149,6 +168,320 @@ def track(
)
+class _Reader(RawIOBase, BinaryIO):
+ """A reader that tracks progress while it's being read from."""
+
+ def __init__(
+ self,
+ handle: BinaryIO,
+ progress: "Progress",
+ task: TaskID,
+ close_handle: bool = True,
+ ) -> None:
+ self.handle = handle
+ self.progress = progress
+ self.task = task
+ self.close_handle = close_handle
+ self._closed = False
+
+ def __enter__(self) -> "_Reader":
+ self.handle.__enter__()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.close()
+
+ def __iter__(self) -> BinaryIO:
+ return self
+
+ def __next__(self) -> bytes:
+ line = next(self.handle)
+ self.progress.advance(self.task, advance=len(line))
+ return line
+
+ @property
+ def closed(self) -> bool:
+ return self._closed
+
+ def fileno(self) -> int:
+ return self.handle.fileno()
+
+ def isatty(self) -> bool:
+ return self.handle.isatty()
+
+ def readable(self) -> bool:
+ return self.handle.readable()
+
+ def seekable(self) -> bool:
+ return self.handle.seekable()
+
+ def writable(self) -> bool:
+ return False
+
+ def read(self, size: int = -1) -> bytes:
+ block = self.handle.read(size)
+ self.progress.advance(self.task, advance=len(block))
+ return block
+
+ def readinto(self, b: Union[bytearray, memoryview, mmap]): # type: ignore[no-untyped-def, override]
+ n = self.handle.readinto(b) # type: ignore[attr-defined]
+ self.progress.advance(self.task, advance=n)
+ return n
+
+ def readline(self, size: int = -1) -> bytes: # type: ignore[override]
+ line = self.handle.readline(size)
+ self.progress.advance(self.task, advance=len(line))
+ return line
+
+ def readlines(self, hint: int = -1) -> List[bytes]:
+ lines = self.handle.readlines(hint)
+ self.progress.advance(self.task, advance=sum(map(len, lines)))
+ return lines
+
+ def close(self) -> None:
+ if self.close_handle:
+ self.handle.close()
+ self._closed = True
+
+ def seek(self, offset: int, whence: int = 0) -> int:
+ pos = self.handle.seek(offset, whence)
+ self.progress.update(self.task, completed=pos)
+ return pos
+
+ def tell(self) -> int:
+ return self.handle.tell()
+
+ def write(self, s: Any) -> int:
+ raise UnsupportedOperation("write")
+
+
+class _ReadContext(ContextManager[_I], Generic[_I]):
+ """A utility class to handle a context for both a reader and a progress."""
+
+ def __init__(self, progress: "Progress", reader: _I) -> None:
+ self.progress = progress
+ self.reader: _I = reader
+
+ def __enter__(self) -> _I:
+ self.progress.start()
+ return self.reader.__enter__()
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.progress.stop()
+ self.reader.__exit__(exc_type, exc_val, exc_tb)
+
+
+def wrap_file(
+ file: BinaryIO,
+ total: int,
+ *,
+ description: str = "Reading...",
+ auto_refresh: bool = True,
+ console: Optional[Console] = None,
+ transient: bool = False,
+ get_time: Optional[Callable[[], float]] = None,
+ refresh_per_second: float = 10,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ disable: bool = False,
+) -> ContextManager[BinaryIO]:
+ """Read bytes from a file while tracking progress.
+
+ Args:
+ file (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode.
+ total (int): Total number of bytes to read.
+ description (str, optional): Description of task show next to progress bar. Defaults to "Reading".
+ auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ console (Console, optional): Console to write to. Default creates internal Console instance.
+ refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ disable (bool, optional): Disable display of progress.
+ Returns:
+ ContextManager[BinaryIO]: A context manager yielding a progress reader.
+
+ """
+
+ columns: List["ProgressColumn"] = (
+ [TextColumn("[progress.description]{task.description}")] if description else []
+ )
+ columns.extend(
+ (
+ BarColumn(
+ style=style,
+ complete_style=complete_style,
+ finished_style=finished_style,
+ pulse_style=pulse_style,
+ ),
+ DownloadColumn(),
+ TimeRemainingColumn(),
+ )
+ )
+ progress = Progress(
+ *columns,
+ auto_refresh=auto_refresh,
+ console=console,
+ transient=transient,
+ get_time=get_time,
+ refresh_per_second=refresh_per_second or 10,
+ disable=disable,
+ )
+
+ reader = progress.wrap_file(file, total=total, description=description)
+ return _ReadContext(progress, reader)
+
+
+@typing.overload
+def open(
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Union[Literal["rt"], Literal["r"]],
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ description: str = "Reading...",
+ auto_refresh: bool = True,
+ console: Optional[Console] = None,
+ transient: bool = False,
+ get_time: Optional[Callable[[], float]] = None,
+ refresh_per_second: float = 10,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ disable: bool = False,
+) -> ContextManager[TextIO]:
+ pass
+
+
+@typing.overload
+def open(
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Literal["rb"],
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ description: str = "Reading...",
+ auto_refresh: bool = True,
+ console: Optional[Console] = None,
+ transient: bool = False,
+ get_time: Optional[Callable[[], float]] = None,
+ refresh_per_second: float = 10,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ disable: bool = False,
+) -> ContextManager[BinaryIO]:
+ pass
+
+
+def open(
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r",
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ description: str = "Reading...",
+ auto_refresh: bool = True,
+ console: Optional[Console] = None,
+ transient: bool = False,
+ get_time: Optional[Callable[[], float]] = None,
+ refresh_per_second: float = 10,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ disable: bool = False,
+) -> Union[ContextManager[BinaryIO], ContextManager[TextIO]]:
+ """Read bytes from a file while tracking progress.
+
+ Args:
+ path (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode.
+ mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
+ buffering (int): The buffering strategy to use, see :func:`io.open`.
+ encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
+ errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
+ newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`
+ total: (int, optional): Total number of bytes to read. Must be provided if reading from a file handle. Default for a path is os.stat(file).st_size.
+ description (str, optional): Description of task show next to progress bar. Defaults to "Reading".
+ auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ console (Console, optional): Console to write to. Default creates internal Console instance.
+ refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ disable (bool, optional): Disable display of progress.
+ encoding (str, optional): The encoding to use when reading in text mode.
+
+ Returns:
+ ContextManager[BinaryIO]: A context manager yielding a progress reader.
+
+ """
+
+ columns: List["ProgressColumn"] = (
+ [TextColumn("[progress.description]{task.description}")] if description else []
+ )
+ columns.extend(
+ (
+ BarColumn(
+ style=style,
+ complete_style=complete_style,
+ finished_style=finished_style,
+ pulse_style=pulse_style,
+ ),
+ DownloadColumn(),
+ TimeRemainingColumn(),
+ )
+ )
+ progress = Progress(
+ *columns,
+ auto_refresh=auto_refresh,
+ console=console,
+ transient=transient,
+ get_time=get_time,
+ refresh_per_second=refresh_per_second or 10,
+ disable=disable,
+ )
+
+ reader = progress.open(
+ file,
+ mode=mode,
+ buffering=buffering,
+ encoding=encoding,
+ errors=errors,
+ newline=newline,
+ total=total,
+ description=description,
+ )
+ return _ReadContext(progress, reader) # type: ignore[return-value, type-var]
+
+
class ProgressColumn(ABC):
"""Base class for a widget to use in progress display."""
@@ -794,6 +1127,157 @@ def track(
advance(task_id, 1)
refresh()
+ def wrap_file(
+ self,
+ file: BinaryIO,
+ total: Optional[int] = None,
+ *,
+ task_id: Optional[TaskID] = None,
+ description: str = "Reading...",
+ ) -> BinaryIO:
+ """Track progress file reading from a binary file.
+
+ Args:
+ file (BinaryIO): A file-like object opened in binary mode.
+ total (int, optional): Total number of bytes to read. This must be provided unless a task with a total is also given.
+ task_id (TaskID): Task to track. Default is new task.
+ description (str, optional): Description of task, if new task is created.
+
+ Returns:
+ BinaryIO: A readable file-like object in binary mode.
+
+ Raises:
+ ValueError: When no total value can be extracted from the arguments or the task.
+ """
+ # attempt to recover the total from the task
+ total_bytes: Optional[float] = None
+ if total is not None:
+ total_bytes = total
+ elif task_id is not None:
+ with self._lock:
+ total_bytes = self._tasks[task_id].total
+ if total_bytes is None:
+ raise ValueError(
+ f"unable to get the total number of bytes, please specify 'total'"
+ )
+
+ # update total of task or create new task
+ if task_id is None:
+ task_id = self.add_task(description, total=total_bytes)
+ else:
+ self.update(task_id, total=total_bytes)
+
+ return _Reader(file, self, task_id, close_handle=False)
+
+ @typing.overload
+ def open(
+ self,
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Literal["rb"],
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ task_id: Optional[TaskID] = None,
+ description: str = "Reading...",
+ ) -> BinaryIO:
+ pass
+
+ @typing.overload
+ def open(
+ self,
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Union[Literal["r"], Literal["rt"]],
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ task_id: Optional[TaskID] = None,
+ description: str = "Reading...",
+ ) -> TextIO:
+ pass
+
+ def open(
+ self,
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r",
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ task_id: Optional[TaskID] = None,
+ description: str = "Reading...",
+ ) -> Union[BinaryIO, TextIO]:
+ """Track progress while reading from a binary file.
+
+ Args:
+ path (Union[str, PathLike[str]]): The path to the file to read.
+ mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
+ buffering (int): The buffering strategy to use, see :func:`io.open`.
+ encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
+ errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
+ newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`.
+ total (int, optional): Total number of bytes to read. If none given, os.stat(path).st_size is used.
+ task_id (TaskID): Task to track. Default is new task.
+ description (str, optional): Description of task, if new task is created.
+
+ Returns:
+ BinaryIO: A readable file-like object in binary mode.
+
+ Raises:
+ ValueError: When an invalid mode is given.
+ """
+ # normalize the mode (always rb, rt)
+ _mode = "".join(sorted(mode, reverse=False))
+ if _mode not in ("br", "rt", "r"):
+ raise ValueError("invalid mode {!r}".format(mode))
+
+ # patch buffering to provide the same behaviour as the builtin `open`
+ line_buffering = buffering == 1
+ if _mode == "br" and buffering == 1:
+ warnings.warn(
+ "line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used",
+ RuntimeWarning,
+ )
+ buffering = -1
+ elif _mode == "rt" or _mode == "r":
+ if buffering == 0:
+ raise ValueError("can't have unbuffered text I/O")
+ elif buffering == 1:
+ buffering = -1
+
+ # attempt to get the total with `os.stat`
+ if total is None:
+ total = stat(file).st_size
+
+ # update total of task or create new task
+ if task_id is None:
+ task_id = self.add_task(description, total=total)
+ else:
+ self.update(task_id, total=total)
+
+ # open the file in binary mode,
+ handle = io.open(file, "rb", buffering=buffering)
+ reader = _Reader(handle, self, task_id, close_handle=True)
+
+ # wrap the reader in a `TextIOWrapper` if text mode
+ if mode == "r" or mode == "rt":
+ return io.TextIOWrapper(
+ reader,
+ encoding=encoding,
+ errors=errors,
+ newline=newline,
+ line_buffering=line_buffering,
+ )
+
+ return reader
+
def start_task(self, task_id: TaskID) -> None:
"""Start a task.
| diff --git a/tests/test_progress.py b/tests/test_progress.py
index db2e825d3a..d3c6171c9c 100644
--- a/tests/test_progress.py
+++ b/tests/test_progress.py
@@ -1,11 +1,14 @@
# encoding=utf-8
import io
+import os
+import tempfile
from time import sleep
from types import SimpleNamespace
import pytest
+import rich.progress
from rich.progress_bar import ProgressBar
from rich.console import Console
from rich.highlighter import NullHighlighter
@@ -549,6 +552,84 @@ def test_no_output_if_progress_is_disabled() -> None:
assert result == expected
+def test_open() -> None:
+ console = Console(
+ file=io.StringIO(),
+ force_terminal=True,
+ width=60,
+ color_system="truecolor",
+ legacy_windows=False,
+ _environ={},
+ )
+ progress = Progress(
+ console=console,
+ )
+
+ fd, filename = tempfile.mkstemp()
+ with os.fdopen(fd, "wb") as f:
+ f.write(b"Hello, World!")
+ try:
+ with rich.progress.open(filename) as f:
+ assert f.read() == "Hello, World!"
+ assert f.closed
+ finally:
+ os.remove(filename)
+
+
+def test_open_text_mode() -> None:
+ fd, filename = tempfile.mkstemp()
+ with os.fdopen(fd, "wb") as f:
+ f.write(b"Hello, World!")
+ try:
+ with rich.progress.open(filename, "r") as f:
+ assert f.read() == "Hello, World!"
+ assert f.closed
+ finally:
+ os.remove(filename)
+
+
+def test_wrap_file() -> None:
+ fd, filename = tempfile.mkstemp()
+ with os.fdopen(fd, "wb") as f:
+ total = f.write(b"Hello, World!")
+ try:
+ with open(filename, "rb") as file:
+ with rich.progress.wrap_file(file, total=total) as f:
+ assert f.read() == b"Hello, World!"
+ assert f.closed
+ assert not f.handle.closed
+ assert not file.closed
+ assert file.closed
+ finally:
+ os.remove(filename)
+
+
+def test_wrap_file_task_total() -> None:
+ console = Console(
+ file=io.StringIO(),
+ force_terminal=True,
+ width=60,
+ color_system="truecolor",
+ legacy_windows=False,
+ _environ={},
+ )
+ progress = Progress(
+ console=console,
+ )
+
+ fd, filename = tempfile.mkstemp()
+ with os.fdopen(fd, "wb") as f:
+ total = f.write(b"Hello, World!")
+ try:
+ with progress:
+ with open(filename, "rb") as file:
+ task_id = progress.add_task("Reading", total=total)
+ with progress.wrap_file(file, task_id=task_id) as f:
+ assert f.read() == b"Hello, World!"
+ finally:
+ os.remove(filename)
+
+
if __name__ == "__main__":
_render = render_progress()
print(_render)
| diff --git a/CHANGELOG.md b/CHANGELOG.md
index 596bd99779..d3967faec4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,7 +5,11 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-## [Unreleased]
+## 12.1.0
+
+### Added
+
+- Progress.open and Progress.wrap_file method to track the progress while reading from a file or file-like object https://github.com/willmcgugan/rich/pull/1759
### Added
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 3dc31bf805..aca64423dc 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -18,6 +18,7 @@ The following people have contributed to the development of Rich:
- [Finn Hughes](https://github.com/finnhughes)
- [Josh Karpel](https://github.com/JoshKarpel)
- [Andrew Kettmann](https://github.com/akettmann)
+- [Martin Larralde](https://github.com/althonos)
- [Hedy Li](https://github.com/hedythedev)
- [Luka Mamukashvili](https://github.com/UltraStudioLTD)
- [Alexander Mancevice](https://github.com/amancevice)
diff --git a/docs/source/progress.rst b/docs/source/progress.rst
index 4ce3588368..f905b255e5 100644
--- a/docs/source/progress.rst
+++ b/docs/source/progress.rst
@@ -26,6 +26,16 @@ For basic usage call the :func:`~rich.progress.track` function, which accepts a
for n in track(range(n), description="Processing..."):
do_work(n)
+
+To get a progress bar while reading from a file, you may consider using the :func:`~rich.progress.read` function, which accepts a path, or a *file-like* object. It will return a *file-like* object in *binary mode* that will update the progress information as it's being read from. Here's an example, tracking the progresses made by :func:`json.load` to load a file::
+
+ import json
+ from rich.progress import read
+
+ with read("data.json", description="Loading data...") as f:
+ data = json.load(f)
+
+
Advanced usage
--------------
@@ -34,9 +44,9 @@ If you require multiple tasks in the display, or wish to configure the columns i
The Progress class is designed to be used as a *context manager* which will start and stop the progress display automatically.
Here's a simple example::
-
+
import time
-
+
from rich.progress import Progress
with Progress() as progress:
@@ -179,7 +189,7 @@ If you have another Console object you want to use, pass it in to the :class:`~r
with Progress(console=my_console) as progress:
my_console.print("[bold blue]Starting work!")
do_work(progress)
-
+
Redirecting stdout / stderr
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -199,6 +209,42 @@ If the :class:`~rich.progress.Progress` class doesn't offer exactly what you nee
def get_renderables(self):
yield Panel(self.make_tasks_table(self.tasks))
+Reading from a file
+~~~~~~~~~~~~~~~~~~~
+
+You can obtain a progress-tracking reader using the :meth:`~rich.progress.Progress.open` method by giving it a path. You can specify the number of bytes to be read, but by default :meth:`~rich.progress.Progress.open` will query the size of the file with :func:`os.stat`. You are responsible for closing the file, and you should consider using a *context* to make sure it is closed ::
+
+ import json
+ from rich.progress import Progress
+
+ with Progress() as progress:
+ with progress.open("data.json", "rb") as file:
+ json.load(file)
+
+
+Note that in the above snippet we use the `"rb"` mode, because we needed the file to be opened in binary mode to pass it to :func:`json.load`. If the API consuming the file is expecting an object in *text mode* (for instance, :func:`csv.reader`), you can open the file with the `"r"` mode, which happens to be the default ::
+
+ from rich.progress import Progress
+
+ with Progress() as progress:
+ with progress.open("README.md") as file:
+ for line in file:
+ print(line)
+
+
+Reading from a file-like object
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can obtain a progress-tracking reader wrapping a file-like object using the :meth:`~rich.progress.Progress.wrap_file` method. The file-like object must be in *binary mode*, and a total must be provided, unless it was provided to a :class:`~rich.progress.Task` created beforehand. The returned reader may be used in a context, but will not take care of closing the wrapped file ::
+
+ import json
+ from rich.progress import Progress
+
+ with Progress() as progress:
+ with open("data.json", "rb") as file:
+ json.load(progress.wrap_file(file, total=2048))
+
+
Multiple Progress
-----------------
@@ -208,4 +254,3 @@ Example
-------
See `downloader.py <https://github.com/willmcgugan/rich/blob/master/examples/downloader.py>`_ for a realistic application of a progress display. This script can download multiple concurrent files with a progress bar, transfer speed and file size.
-
| [
{
"components": [
{
"doc": "A reader that tracks progress while it's being read from.",
"lines": [
171,
260
],
"name": "_Reader",
"signature": "class _Reader(RawIOBase, BinaryIO):",
"type": "class"
},
{
"doc": "",
... | [
"tests/test_progress.py::test_open",
"tests/test_progress.py::test_open_text_mode",
"tests/test_progress.py::test_wrap_file",
"tests/test_progress.py::test_wrap_file_task_total"
] | [
"tests/test_progress.py::test_bar_columns",
"tests/test_progress.py::test_text_column",
"tests/test_progress.py::test_time_elapsed_column",
"tests/test_progress.py::test_time_remaining_column",
"tests/test_progress.py::test_compact_time_remaining_column[None---:--]",
"tests/test_progress.py::test_compact_... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add API to support tracking progress while reading from a file
## Type of changes
- [ ] Bug fix
- [x] New feature
- [ ] Documentation / docstrings
- [ ] Tests
- [ ] Other
## Checklist
- [x] I've run the latest [black](https://github.com/psf/black) with default args on new code.
- [x] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [x] I've added tests for new code.
- [x] I accept that @willmcgugan may be pedantic in the code review.
## Description
Hi Will! I've started using `rich` more and more, so far it's a great experience :wink:
One of my common use case for the `rich.progress` module is tracking the progress made while reading from a file. In bioinformatics it's not rare to process gigabytes of files so I like to have an idea how long of a break I can take while processing my input data!

Manually updating the progress bar works for certain things, but for instance, when I want to pass a file-like handle to an API that consumes it (e.g `json.load`), there's no way to update the progress bar between each chunk. To do so, the only way I've found was to wrap the file-like object itself so that it updates the progress bar whenever the file is read from. This is something you could partially do with [`tqdm.tqdm.wrapattr`](https://tqdm.github.io/docs/tqdm/#wrapattr).
Since the `rich.progress` API has no straightforward support for this (like it does with `track` for an iterator), I made my own snippet for this, which ends up working well. I've been copying around in every project that uses `rich`, so I think it would make a good addition to the library!
## Changes
This PR adds the `rich.progress._Reader` class, which wraps a file-like object, and updates a `Progress` instance while being read from. The class itself is private; to get a reader, you have to use the new `Progress.read` method, which takes either a path or a file-like object, and returns a file reader with progress support.
This lets you do something like this (e.g. loading JSON-serialized data from a file, using `os.stat` to get the total number of bytes to be read):
```python
import json
from rich.progress import *
with Progress(columns=[BarColumn(), DownloadColumn()]) as progress:
with progress.read("data.json") as f:
data = json.load(f)
```
You can also directly pass a file-like object to `progress.read`, in which case you *must* specify the total:
```python
import json
from rich.progress import *
with Progress(columns=[BarColumn(), DownloadColumn()]) as progress:
with open("data.json", "rb") as f:
data = json.load(progress.read(f, total=2048))
```
In addition, I added a wrapper function `rich.progress.read` like `rich.progress.track` which handles setting up a progress, so that you can get a progress reader in just two lines:
```python
with rich.progress.read("data.json") as f:
data = json.load(f)
```
### Notes
- In `rich.progress.read` and `Progress.read`, I made `total` an `int` instead of a `float`, because it should be a number of bytes so a float would not make sense here.
- If you `seek` the `_Reader`, it will also reset the position of the progress bar to the new position.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rich/progress.py]
(definition of _Reader:)
class _Reader(RawIOBase, BinaryIO):
"""A reader that tracks progress while it's being read from."""
(definition of _Reader.__init__:)
def __init__( self, handle: BinaryIO, progress: "Progress", task: TaskID, close_handle: bool = True, ) -> None:
(definition of _Reader.__enter__:)
def __enter__(self) -> "_Reader":
(definition of _Reader.__exit__:)
def __exit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None:
(definition of _Reader.__iter__:)
def __iter__(self) -> BinaryIO:
(definition of _Reader.__next__:)
def __next__(self) -> bytes:
(definition of _Reader.closed:)
def closed(self) -> bool:
(definition of _Reader.fileno:)
def fileno(self) -> int:
(definition of _Reader.isatty:)
def isatty(self) -> bool:
(definition of _Reader.readable:)
def readable(self) -> bool:
(definition of _Reader.seekable:)
def seekable(self) -> bool:
(definition of _Reader.writable:)
def writable(self) -> bool:
(definition of _Reader.read:)
def read(self, size: int = -1) -> bytes:
(definition of _Reader.readinto:)
def readinto(self, b: Union[bytearray, memoryview, mmap]):
(definition of _Reader.readline:)
def readline(self, size: int = -1) -> bytes:
(definition of _Reader.readlines:)
def readlines(self, hint: int = -1) -> List[bytes]:
(definition of _Reader.close:)
def close(self) -> None:
(definition of _Reader.seek:)
def seek(self, offset: int, whence: int = 0) -> int:
(definition of _Reader.tell:)
def tell(self) -> int:
(definition of _Reader.write:)
def write(self, s: Any) -> int:
(definition of _ReadContext:)
class _ReadContext(ContextManager[_I], Generic[_I]):
"""A utility class to handle a context for both a reader and a progress."""
(definition of _ReadContext.__init__:)
def __init__(self, progress: "Progress", reader: _I) -> None:
(definition of _ReadContext.__enter__:)
def __enter__(self) -> _I:
(definition of _ReadContext.__exit__:)
def __exit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None:
(definition of wrap_file:)
def wrap_file( file: BinaryIO, total: int, *, description: str = "Reading...", auto_refresh: bool = True, console: Optional[Console] = None, transient: bool = False, get_time: Optional[Callable[[], float]] = None, refresh_per_second: float = 10, style: StyleType = "bar.back", complete_style: StyleType = "bar.complete", finished_style: StyleType = "bar.finished", pulse_style: StyleType = "bar.pulse", disable: bool = False, ) -> ContextManager[BinaryIO]:
"""Read bytes from a file while tracking progress.
Args:
file (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode.
total (int): Total number of bytes to read.
description (str, optional): Description of task show next to progress bar. Defaults to "Reading".
auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
transient: (bool, optional): Clear the progress on exit. Defaults to False.
console (Console, optional): Console to write to. Default creates internal Console instance.
refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done".
pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
disable (bool, optional): Disable display of progress.
Returns:
ContextManager[BinaryIO]: A context manager yielding a progress reader."""
(definition of open:)
def open( file: Union[str, "PathLike[str]", bytes], mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, *, total: Optional[int] = None, description: str = "Reading...", auto_refresh: bool = True, console: Optional[Console] = None, transient: bool = False, get_time: Optional[Callable[[], float]] = None, refresh_per_second: float = 10, style: StyleType = "bar.back", complete_style: StyleType = "bar.complete", finished_style: StyleType = "bar.finished", pulse_style: StyleType = "bar.pulse", disable: bool = False, ) -> Union[ContextManager[BinaryIO], ContextManager[TextIO]]:
"""Read bytes from a file while tracking progress.
Args:
path (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode.
mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
buffering (int): The buffering strategy to use, see :func:`io.open`.
encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`
total: (int, optional): Total number of bytes to read. Must be provided if reading from a file handle. Default for a path is os.stat(file).st_size.
description (str, optional): Description of task show next to progress bar. Defaults to "Reading".
auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
transient: (bool, optional): Clear the progress on exit. Defaults to False.
console (Console, optional): Console to write to. Default creates internal Console instance.
refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done".
pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
disable (bool, optional): Disable display of progress.
encoding (str, optional): The encoding to use when reading in text mode.
Returns:
ContextManager[BinaryIO]: A context manager yielding a progress reader."""
(definition of Progress.wrap_file:)
def wrap_file( self, file: BinaryIO, total: Optional[int] = None, *, task_id: Optional[TaskID] = None, description: str = "Reading...", ) -> BinaryIO:
"""Track progress file reading from a binary file.
Args:
file (BinaryIO): A file-like object opened in binary mode.
total (int, optional): Total number of bytes to read. This must be provided unless a task with a total is also given.
task_id (TaskID): Task to track. Default is new task.
description (str, optional): Description of task, if new task is created.
Returns:
BinaryIO: A readable file-like object in binary mode.
Raises:
ValueError: When no total value can be extracted from the arguments or the task."""
(definition of Progress.open:)
def open( self, file: Union[str, "PathLike[str]", bytes], mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r", buffering: int = -1, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, *, total: Optional[int] = None, task_id: Optional[TaskID] = None, description: str = "Reading...", ) -> Union[BinaryIO, TextIO]:
"""Track progress while reading from a binary file.
Args:
path (Union[str, PathLike[str]]): The path to the file to read.
mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
buffering (int): The buffering strategy to use, see :func:`io.open`.
encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`.
total (int, optional): Total number of bytes to read. If none given, os.stat(path).st_size is used.
task_id (TaskID): Task to track. Default is new task.
description (str, optional): Description of task, if new task is created.
Returns:
BinaryIO: A readable file-like object in binary mode.
Raises:
ValueError: When an invalid mode is given."""
[end of new definitions in rich/progress.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | b0661de34bab35af9b4b1d3ba8e28b186b225e84 | |
matplotlib__matplotlib-21962 | 21,962 | matplotlib/matplotlib | 3.5 | b57506eb00afa4b09cda023e4d76ed06ca976217 | 2021-12-15T11:24:54Z | diff --git a/doc/api/next_api_changes/deprecations/21962-AL.rst b/doc/api/next_api_changes/deprecations/21962-AL.rst
new file mode 100644
index 000000000000..e5ddab747b48
--- /dev/null
+++ b/doc/api/next_api_changes/deprecations/21962-AL.rst
@@ -0,0 +1,5 @@
+``backend_pgf``
+~~~~~~~~~~~~~~~
+The following API elements have been deprecated with no
+replacement: ``NO_ESCAPE``, ``re_mathsep``, ``get_fontspec``, ``get_preamble``,
+``common_texification``, ``writeln``.
diff --git a/lib/matplotlib/backends/backend_pgf.py b/lib/matplotlib/backends/backend_pgf.py
index 334cd6a05f16..d0341bc36e89 100644
--- a/lib/matplotlib/backends/backend_pgf.py
+++ b/lib/matplotlib/backends/backend_pgf.py
@@ -36,58 +36,69 @@
# which is not recognized by TeX.
-def get_fontspec():
- """Build fontspec preamble from rc."""
- latex_fontspec = []
- texcommand = mpl.rcParams["pgf.texsystem"]
-
- if texcommand != "pdflatex":
- latex_fontspec.append("\\usepackage{fontspec}")
+@_api.caching_module_getattr
+class __getattr__:
+ NO_ESCAPE = _api.deprecated("3.6", obj_type="")(
+ property(lambda self: _NO_ESCAPE))
+ re_mathsep = _api.deprecated("3.6", obj_type="")(
+ property(lambda self: _split_math.__self__))
- if texcommand != "pdflatex" and mpl.rcParams["pgf.rcfonts"]:
- families = ["serif", "sans\\-serif", "monospace"]
- commands = ["setmainfont", "setsansfont", "setmonofont"]
- for family, command in zip(families, commands):
- # 1) Forward slashes also work on Windows, so don't mess with
- # backslashes. 2) The dirname needs to include a separator.
- path = pathlib.Path(fm.findfont(family))
- latex_fontspec.append(r"\%s{%s}[Path=\detokenize{%s}]" % (
- command, path.name, path.parent.as_posix() + "/"))
- return "\n".join(latex_fontspec)
+@_api.deprecated("3.6")
+def get_fontspec():
+ """Build fontspec preamble from rc."""
+ with mpl.rc_context({"pgf.preamble": ""}):
+ return _get_preamble()
+@_api.deprecated("3.6")
def get_preamble():
"""Get LaTeX preamble from rc."""
return mpl.rcParams["pgf.preamble"]
-###############################################################################
-# This almost made me cry!!!
-# In the end, it's better to use only one unit for all coordinates, since the
+def _get_preamble():
+ """Prepare a LaTeX preamble based on the rcParams configuration."""
+ preamble = [mpl.rcParams["pgf.preamble"]]
+ if mpl.rcParams["pgf.texsystem"] != "pdflatex":
+ preamble.append("\\usepackage{fontspec}")
+ if mpl.rcParams["pgf.rcfonts"]:
+ families = ["serif", "sans\\-serif", "monospace"]
+ commands = ["setmainfont", "setsansfont", "setmonofont"]
+ for family, command in zip(families, commands):
+ # 1) Forward slashes also work on Windows, so don't mess with
+ # backslashes. 2) The dirname needs to include a separator.
+ path = pathlib.Path(fm.findfont(family))
+ preamble.append(r"\%s{%s}[Path=\detokenize{%s/}]" % (
+ command, path.name, path.parent.as_posix()))
+ return "\n".join(preamble)
+
+
+# It's better to use only one unit for all coordinates, since the
# arithmetic in latex seems to produce inaccurate conversions.
latex_pt_to_in = 1. / 72.27
latex_in_to_pt = 1. / latex_pt_to_in
mpl_pt_to_in = 1. / 72.
mpl_in_to_pt = 1. / mpl_pt_to_in
-###############################################################################
-# helper functions
-
-NO_ESCAPE = r"(?<!\\)(?:\\\\)*"
-re_mathsep = re.compile(NO_ESCAPE + r"\$")
-
+_NO_ESCAPE = r"(?<!\\)(?:\\\\)*"
+_split_math = re.compile(_NO_ESCAPE + r"\$").split
_replace_escapetext = functools.partial(
# When the next character is _, ^, $, or % (not preceded by an escape),
# insert a backslash.
- re.compile(NO_ESCAPE + "(?=[_^$%])").sub, "\\\\")
+ re.compile(_NO_ESCAPE + "(?=[_^$%])").sub, "\\\\")
_replace_mathdefault = functools.partial(
# Replace \mathdefault (when not preceded by an escape) by empty string.
- re.compile(NO_ESCAPE + r"(\\mathdefault)").sub, "")
+ re.compile(_NO_ESCAPE + r"(\\mathdefault)").sub, "")
+@_api.deprecated("3.6")
def common_texification(text):
+ return _tex_escape(text)
+
+
+def _tex_escape(text):
r"""
Do some necessary and/or useful substitutions for texts to be included in
LaTeX documents.
@@ -103,7 +114,7 @@ def common_texification(text):
text = _replace_mathdefault(text)
text = text.replace("\N{MINUS SIGN}", r"\ensuremath{-}")
# split text into normaltext and inline math parts
- parts = re_mathsep.split(text)
+ parts = _split_math(text)
for i, s in enumerate(parts):
if not i % 2:
# textmode replacements
@@ -115,7 +126,12 @@ def common_texification(text):
return "".join(parts)
+@_api.deprecated("3.6")
def writeln(fh, line):
+ return _writeln(fh, line)
+
+
+def _writeln(fh, line):
# Ending lines with a % prevents TeX from inserting spurious spaces
# (https://tex.stackexchange.com/questions/7453).
fh.write(line)
@@ -152,7 +168,7 @@ def _escape_and_apply_props(s, prop):
commands.append(r"\bfseries")
commands.append(r"\selectfont")
- return "".join(commands) + " " + common_texification(s)
+ return "".join(commands) + " " + _tex_escape(s)
def _metadata_to_str(key, value):
@@ -212,8 +228,6 @@ class LatexManager:
@staticmethod
def _build_latex_header():
- latex_preamble = get_preamble()
- latex_fontspec = get_fontspec()
# Create LaTeX header with some content, else LaTeX will load some math
# fonts later when we don't expect the additional output on stdout.
# TODO: is this sufficient?
@@ -224,8 +238,7 @@ def _build_latex_header():
rf"% !TeX program = {mpl.rcParams['pgf.texsystem']}",
# Test whether \includegraphics supports interpolate option.
r"\usepackage{graphicx}",
- latex_preamble,
- latex_fontspec,
+ _get_preamble(),
r"\begin{document}",
r"text $math \mu$", # force latex to load fonts now
r"\typeout{pgf_backend_query_start}",
@@ -400,7 +413,7 @@ def draw_markers(self, gc, marker_path, marker_trans, path, trans,
rgbFace=None):
# docstring inherited
- writeln(self.fh, r"\begin{pgfscope}")
+ _writeln(self.fh, r"\begin{pgfscope}")
# convert from display units to in
f = 1. / self.dpi
@@ -412,13 +425,13 @@ def draw_markers(self, gc, marker_path, marker_trans, path, trans,
# build marker definition
bl, tr = marker_path.get_extents(marker_trans).get_points()
coords = bl[0] * f, bl[1] * f, tr[0] * f, tr[1] * f
- writeln(self.fh,
- r"\pgfsys@defobject{currentmarker}"
- r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{" % coords)
+ _writeln(self.fh,
+ r"\pgfsys@defobject{currentmarker}"
+ r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{" % coords)
self._print_pgf_path(None, marker_path, marker_trans)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
- writeln(self.fh, r"}")
+ _writeln(self.fh, r"}")
maxcoord = 16383 / 72.27 * self.dpi # Max dimensions in LaTeX.
clip = (-maxcoord, -maxcoord, maxcoord, maxcoord)
@@ -427,48 +440,48 @@ def draw_markers(self, gc, marker_path, marker_trans, path, trans,
for point, code in path.iter_segments(trans, simplify=False,
clip=clip):
x, y = point[0] * f, point[1] * f
- writeln(self.fh, r"\begin{pgfscope}")
- writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (x, y))
- writeln(self.fh, r"\pgfsys@useobject{currentmarker}{}")
- writeln(self.fh, r"\end{pgfscope}")
+ _writeln(self.fh, r"\begin{pgfscope}")
+ _writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (x, y))
+ _writeln(self.fh, r"\pgfsys@useobject{currentmarker}{}")
+ _writeln(self.fh, r"\end{pgfscope}")
- writeln(self.fh, r"\end{pgfscope}")
+ _writeln(self.fh, r"\end{pgfscope}")
def draw_path(self, gc, path, transform, rgbFace=None):
# docstring inherited
- writeln(self.fh, r"\begin{pgfscope}")
+ _writeln(self.fh, r"\begin{pgfscope}")
# draw the path
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
self._print_pgf_path(gc, path, transform, rgbFace)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
- writeln(self.fh, r"\end{pgfscope}")
+ _writeln(self.fh, r"\end{pgfscope}")
# if present, draw pattern on top
if gc.get_hatch():
- writeln(self.fh, r"\begin{pgfscope}")
+ _writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_path_styles(gc, rgbFace)
# combine clip and path for clipping
self._print_pgf_clip(gc)
self._print_pgf_path(gc, path, transform, rgbFace)
- writeln(self.fh, r"\pgfusepath{clip}")
+ _writeln(self.fh, r"\pgfusepath{clip}")
# build pattern definition
- writeln(self.fh,
- r"\pgfsys@defobject{currentpattern}"
- r"{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}{")
- writeln(self.fh, r"\begin{pgfscope}")
- writeln(self.fh,
- r"\pgfpathrectangle"
- r"{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}")
- writeln(self.fh, r"\pgfusepath{clip}")
+ _writeln(self.fh,
+ r"\pgfsys@defobject{currentpattern}"
+ r"{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}{")
+ _writeln(self.fh, r"\begin{pgfscope}")
+ _writeln(self.fh,
+ r"\pgfpathrectangle"
+ r"{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}")
+ _writeln(self.fh, r"\pgfusepath{clip}")
scale = mpl.transforms.Affine2D().scale(self.dpi)
self._print_pgf_path(None, gc.get_hatch_path(), scale)
self._pgf_path_draw(stroke=True)
- writeln(self.fh, r"\end{pgfscope}")
- writeln(self.fh, r"}")
+ _writeln(self.fh, r"\end{pgfscope}")
+ _writeln(self.fh, r"}")
# repeat pattern, filling the bounding rect of the path
f = 1. / self.dpi
(xmin, ymin), (xmax, ymax) = \
@@ -476,16 +489,16 @@ def draw_path(self, gc, path, transform, rgbFace=None):
xmin, xmax = f * xmin, f * xmax
ymin, ymax = f * ymin, f * ymax
repx, repy = math.ceil(xmax - xmin), math.ceil(ymax - ymin)
- writeln(self.fh,
- r"\pgfsys@transformshift{%fin}{%fin}" % (xmin, ymin))
+ _writeln(self.fh,
+ r"\pgfsys@transformshift{%fin}{%fin}" % (xmin, ymin))
for iy in range(repy):
for ix in range(repx):
- writeln(self.fh, r"\pgfsys@useobject{currentpattern}{}")
- writeln(self.fh, r"\pgfsys@transformshift{1in}{0in}")
- writeln(self.fh, r"\pgfsys@transformshift{-%din}{0in}" % repx)
- writeln(self.fh, r"\pgfsys@transformshift{0in}{1in}")
+ _writeln(self.fh, r"\pgfsys@useobject{currentpattern}{}")
+ _writeln(self.fh, r"\pgfsys@transformshift{1in}{0in}")
+ _writeln(self.fh, r"\pgfsys@transformshift{-%din}{0in}" % repx)
+ _writeln(self.fh, r"\pgfsys@transformshift{0in}{1in}")
- writeln(self.fh, r"\end{pgfscope}")
+ _writeln(self.fh, r"\end{pgfscope}")
def _print_pgf_clip(self, gc):
f = 1. / self.dpi
@@ -495,30 +508,30 @@ def _print_pgf_clip(self, gc):
p1, p2 = bbox.get_points()
w, h = p2 - p1
coords = p1[0] * f, p1[1] * f, w * f, h * f
- writeln(self.fh,
- r"\pgfpathrectangle"
- r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}"
- % coords)
- writeln(self.fh, r"\pgfusepath{clip}")
+ _writeln(self.fh,
+ r"\pgfpathrectangle"
+ r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}"
+ % coords)
+ _writeln(self.fh, r"\pgfusepath{clip}")
# check for clip path
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
self._print_pgf_path(gc, clippath, clippath_trans)
- writeln(self.fh, r"\pgfusepath{clip}")
+ _writeln(self.fh, r"\pgfusepath{clip}")
def _print_pgf_path_styles(self, gc, rgbFace):
# cap style
capstyles = {"butt": r"\pgfsetbuttcap",
"round": r"\pgfsetroundcap",
"projecting": r"\pgfsetrectcap"}
- writeln(self.fh, capstyles[gc.get_capstyle()])
+ _writeln(self.fh, capstyles[gc.get_capstyle()])
# join style
joinstyles = {"miter": r"\pgfsetmiterjoin",
"round": r"\pgfsetroundjoin",
"bevel": r"\pgfsetbeveljoin"}
- writeln(self.fh, joinstyles[gc.get_joinstyle()])
+ _writeln(self.fh, joinstyles[gc.get_joinstyle()])
# filling
has_fill = rgbFace is not None
@@ -530,33 +543,33 @@ def _print_pgf_path_styles(self, gc, rgbFace):
fillopacity = rgbFace[3] if has_fill and len(rgbFace) > 3 else 1.0
if has_fill:
- writeln(self.fh,
- r"\definecolor{currentfill}{rgb}{%f,%f,%f}"
- % tuple(rgbFace[:3]))
- writeln(self.fh, r"\pgfsetfillcolor{currentfill}")
+ _writeln(self.fh,
+ r"\definecolor{currentfill}{rgb}{%f,%f,%f}"
+ % tuple(rgbFace[:3]))
+ _writeln(self.fh, r"\pgfsetfillcolor{currentfill}")
if has_fill and fillopacity != 1.0:
- writeln(self.fh, r"\pgfsetfillopacity{%f}" % fillopacity)
+ _writeln(self.fh, r"\pgfsetfillopacity{%f}" % fillopacity)
# linewidth and color
lw = gc.get_linewidth() * mpl_pt_to_in * latex_in_to_pt
stroke_rgba = gc.get_rgb()
- writeln(self.fh, r"\pgfsetlinewidth{%fpt}" % lw)
- writeln(self.fh,
- r"\definecolor{currentstroke}{rgb}{%f,%f,%f}"
- % stroke_rgba[:3])
- writeln(self.fh, r"\pgfsetstrokecolor{currentstroke}")
+ _writeln(self.fh, r"\pgfsetlinewidth{%fpt}" % lw)
+ _writeln(self.fh,
+ r"\definecolor{currentstroke}{rgb}{%f,%f,%f}"
+ % stroke_rgba[:3])
+ _writeln(self.fh, r"\pgfsetstrokecolor{currentstroke}")
if strokeopacity != 1.0:
- writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % strokeopacity)
+ _writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % strokeopacity)
# line style
dash_offset, dash_list = gc.get_dashes()
if dash_list is None:
- writeln(self.fh, r"\pgfsetdash{}{0pt}")
+ _writeln(self.fh, r"\pgfsetdash{}{0pt}")
else:
- writeln(self.fh,
- r"\pgfsetdash{%s}{%fpt}"
- % ("".join(r"{%fpt}" % dash for dash in dash_list),
- dash_offset))
+ _writeln(self.fh,
+ r"\pgfsetdash{%s}{%fpt}"
+ % ("".join(r"{%fpt}" % dash for dash in dash_list),
+ dash_offset))
def _print_pgf_path(self, gc, path, transform, rgbFace=None):
f = 1. / self.dpi
@@ -573,32 +586,32 @@ def _print_pgf_path(self, gc, path, transform, rgbFace=None):
for points, code in path.iter_segments(transform, clip=clip):
if code == Path.MOVETO:
x, y = tuple(points)
- writeln(self.fh,
- r"\pgfpathmoveto{\pgfqpoint{%fin}{%fin}}" %
- (f * x, f * y))
+ _writeln(self.fh,
+ r"\pgfpathmoveto{\pgfqpoint{%fin}{%fin}}" %
+ (f * x, f * y))
elif code == Path.CLOSEPOLY:
- writeln(self.fh, r"\pgfpathclose")
+ _writeln(self.fh, r"\pgfpathclose")
elif code == Path.LINETO:
x, y = tuple(points)
- writeln(self.fh,
- r"\pgfpathlineto{\pgfqpoint{%fin}{%fin}}" %
- (f * x, f * y))
+ _writeln(self.fh,
+ r"\pgfpathlineto{\pgfqpoint{%fin}{%fin}}" %
+ (f * x, f * y))
elif code == Path.CURVE3:
cx, cy, px, py = tuple(points)
coords = cx * f, cy * f, px * f, py * f
- writeln(self.fh,
- r"\pgfpathquadraticcurveto"
- r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}"
- % coords)
+ _writeln(self.fh,
+ r"\pgfpathquadraticcurveto"
+ r"{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}"
+ % coords)
elif code == Path.CURVE4:
c1x, c1y, c2x, c2y, px, py = tuple(points)
coords = c1x * f, c1y * f, c2x * f, c2y * f, px * f, py * f
- writeln(self.fh,
- r"\pgfpathcurveto"
- r"{\pgfqpoint{%fin}{%fin}}"
- r"{\pgfqpoint{%fin}{%fin}}"
- r"{\pgfqpoint{%fin}{%fin}}"
- % coords)
+ _writeln(self.fh,
+ r"\pgfpathcurveto"
+ r"{\pgfqpoint{%fin}{%fin}}"
+ r"{\pgfqpoint{%fin}{%fin}}"
+ r"{\pgfqpoint{%fin}{%fin}}"
+ % coords)
# apply pgf decorators
sketch_params = gc.get_sketch_params() if gc else None
@@ -616,13 +629,13 @@ def _print_pgf_path(self, gc, path, transform, rgbFace=None):
length *= 0.5
scale *= 2
# PGF guarantees that repeated loading is a no-op
- writeln(self.fh, r"\usepgfmodule{decorations}")
- writeln(self.fh, r"\usepgflibrary{decorations.pathmorphing}")
- writeln(self.fh, r"\pgfkeys{/pgf/decoration/.cd, "
- f"segment length = {(length * f):f}in, "
- f"amplitude = {(scale * f):f}in}}")
- writeln(self.fh, f"\\pgfmathsetseed{{{int(randomness)}}}")
- writeln(self.fh, r"\pgfdecoratecurrentpath{random steps}")
+ _writeln(self.fh, r"\usepgfmodule{decorations}")
+ _writeln(self.fh, r"\usepgflibrary{decorations.pathmorphing}")
+ _writeln(self.fh, r"\pgfkeys{/pgf/decoration/.cd, "
+ f"segment length = {(length * f):f}in, "
+ f"amplitude = {(scale * f):f}in}}")
+ _writeln(self.fh, f"\\pgfmathsetseed{{{int(randomness)}}}")
+ _writeln(self.fh, r"\pgfdecoratecurrentpath{random steps}")
def _pgf_path_draw(self, stroke=True, fill=False):
actions = []
@@ -630,7 +643,7 @@ def _pgf_path_draw(self, stroke=True, fill=False):
actions.append("stroke")
if fill:
actions.append("fill")
- writeln(self.fh, r"\pgfusepath{%s}" % ",".join(actions))
+ _writeln(self.fh, r"\pgfusepath{%s}" % ",".join(actions))
def option_scale_image(self):
# docstring inherited
@@ -659,27 +672,27 @@ def draw_image(self, gc, x, y, im, transform=None):
self.image_counter += 1
# reference the image in the pgf picture
- writeln(self.fh, r"\begin{pgfscope}")
+ _writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_clip(gc)
f = 1. / self.dpi # from display coords to inch
if transform is None:
- writeln(self.fh,
- r"\pgfsys@transformshift{%fin}{%fin}" % (x * f, y * f))
+ _writeln(self.fh,
+ r"\pgfsys@transformshift{%fin}{%fin}" % (x * f, y * f))
w, h = w * f, h * f
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.frozen().to_values()
- writeln(self.fh,
- r"\pgfsys@transformcm{%f}{%f}{%f}{%f}{%fin}{%fin}" %
- (tr1 * f, tr2 * f, tr3 * f, tr4 * f,
- (tr5 + x) * f, (tr6 + y) * f))
+ _writeln(self.fh,
+ r"\pgfsys@transformcm{%f}{%f}{%f}{%f}{%fin}{%fin}" %
+ (tr1 * f, tr2 * f, tr3 * f, tr4 * f,
+ (tr5 + x) * f, (tr6 + y) * f))
w = h = 1 # scale is already included in the transform
interp = str(transform is None).lower() # interpolation in PDF reader
- writeln(self.fh,
- r"\pgftext[left,bottom]"
- r"{%s[interpolate=%s,width=%fin,height=%fin]{%s}}" %
- (_get_image_inclusion_command(),
- interp, w, h, fname_img))
- writeln(self.fh, r"\end{pgfscope}")
+ _writeln(self.fh,
+ r"\pgftext[left,bottom]"
+ r"{%s[interpolate=%s,width=%fin,height=%fin]{%s}}" %
+ (_get_image_inclusion_command(),
+ interp, w, h, fname_img))
+ _writeln(self.fh, r"\end{pgfscope}")
def draw_tex(self, gc, x, y, s, prop, angle, ismath="TeX", mtext=None):
# docstring inherited
@@ -691,16 +704,16 @@ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# prepare string for tex
s = _escape_and_apply_props(s, prop)
- writeln(self.fh, r"\begin{pgfscope}")
+ _writeln(self.fh, r"\begin{pgfscope}")
alpha = gc.get_alpha()
if alpha != 1.0:
- writeln(self.fh, r"\pgfsetfillopacity{%f}" % alpha)
- writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % alpha)
+ _writeln(self.fh, r"\pgfsetfillopacity{%f}" % alpha)
+ _writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % alpha)
rgb = tuple(gc.get_rgb())[:3]
- writeln(self.fh, r"\definecolor{textcolor}{rgb}{%f,%f,%f}" % rgb)
- writeln(self.fh, r"\pgfsetstrokecolor{textcolor}")
- writeln(self.fh, r"\pgfsetfillcolor{textcolor}")
+ _writeln(self.fh, r"\definecolor{textcolor}{rgb}{%f,%f,%f}" % rgb)
+ _writeln(self.fh, r"\pgfsetstrokecolor{textcolor}")
+ _writeln(self.fh, r"\pgfsetfillcolor{textcolor}")
s = r"\color{textcolor}" + s
dpi = self.figure.dpi
@@ -729,8 +742,8 @@ def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
if angle != 0:
text_args.append("rotate=%f" % angle)
- writeln(self.fh, r"\pgftext[%s]{%s}" % (",".join(text_args), s))
- writeln(self.fh, r"\end{pgfscope}")
+ _writeln(self.fh, r"\pgftext[%s]{%s}" % (",".join(text_args), s))
+ _writeln(self.fh, r"\end{pgfscope}")
def get_text_width_height_descent(self, s, prop, ismath):
# docstring inherited
@@ -816,9 +829,7 @@ def _print_pgf_to_fh(self, fh, *, bbox_inches_restore=None):
# append the preamble used by the backend as a comment for debugging
header_info_preamble = ["%% Matplotlib used the following preamble"]
- for line in get_preamble().splitlines():
- header_info_preamble.append("%% " + line)
- for line in get_fontspec().splitlines():
+ for line in _get_preamble().splitlines():
header_info_preamble.append("%% " + line)
header_info_preamble.append("%%")
header_info_preamble = "\n".join(header_info_preamble)
@@ -831,22 +842,22 @@ def _print_pgf_to_fh(self, fh, *, bbox_inches_restore=None):
fh.write(header_text)
fh.write(header_info_preamble)
fh.write("\n")
- writeln(fh, r"\begingroup")
- writeln(fh, r"\makeatletter")
- writeln(fh, r"\begin{pgfpicture}")
- writeln(fh,
- r"\pgfpathrectangle{\pgfpointorigin}{\pgfqpoint{%fin}{%fin}}"
- % (w, h))
- writeln(fh, r"\pgfusepath{use as bounding box, clip}")
+ _writeln(fh, r"\begingroup")
+ _writeln(fh, r"\makeatletter")
+ _writeln(fh, r"\begin{pgfpicture}")
+ _writeln(fh,
+ r"\pgfpathrectangle{\pgfpointorigin}{\pgfqpoint{%fin}{%fin}}"
+ % (w, h))
+ _writeln(fh, r"\pgfusepath{use as bounding box, clip}")
renderer = MixedModeRenderer(self.figure, w, h, dpi,
RendererPgf(self.figure, fh),
bbox_inches_restore=bbox_inches_restore)
self.figure.draw(renderer)
# end the pgfpicture environment
- writeln(fh, r"\end{pgfpicture}")
- writeln(fh, r"\makeatother")
- writeln(fh, r"\endgroup")
+ _writeln(fh, r"\end{pgfpicture}")
+ _writeln(fh, r"\makeatother")
+ _writeln(fh, r"\endgroup")
def print_pgf(self, fname_or_fh, **kwargs):
"""
@@ -877,8 +888,7 @@ def print_pdf(self, fname_or_fh, *, metadata=None, **kwargs):
r"\documentclass[12pt]{minimal}",
r"\usepackage[papersize={%fin,%fin}, margin=0in]{geometry}"
% (w, h),
- get_preamble(),
- get_fontspec(),
+ _get_preamble(),
r"\usepackage{pgf}",
r"\begin{document}",
r"\centering",
@@ -989,8 +999,7 @@ def _write_header(self, width_inches, height_inches):
r"\documentclass[12pt]{minimal}",
r"\usepackage[papersize={%fin,%fin}, margin=0in]{geometry}"
% (width_inches, height_inches),
- get_preamble(),
- get_fontspec(),
+ _get_preamble(),
r"\usepackage{pgf}",
r"\setlength{\parindent}{0pt}",
r"\begin{document}%",
| diff --git a/lib/matplotlib/tests/test_backend_pgf.py b/lib/matplotlib/tests/test_backend_pgf.py
index 9b5b0b28ee3f..db094f1d0a8d 100644
--- a/lib/matplotlib/tests/test_backend_pgf.py
+++ b/lib/matplotlib/tests/test_backend_pgf.py
@@ -11,7 +11,7 @@
import matplotlib.pyplot as plt
from matplotlib.testing import _has_tex_package, _check_for_pgf
from matplotlib.testing.compare import compare_images, ImageComparisonFailure
-from matplotlib.backends.backend_pgf import PdfPages, common_texification
+from matplotlib.backends.backend_pgf import PdfPages, _tex_escape
from matplotlib.testing.decorators import (_image_directories,
check_figures_equal,
image_comparison)
@@ -73,8 +73,8 @@ def create_figure():
('% not a comment', r'\% not a comment'),
('^not', r'\^not'),
])
-def test_common_texification(plain_text, escaped_text):
- assert common_texification(plain_text) == escaped_text
+def test_tex_escape(plain_text, escaped_text):
+ assert _tex_escape(plain_text) == escaped_text
# test compiling a figure to pdf with xelatex
| diff --git a/doc/api/next_api_changes/deprecations/21962-AL.rst b/doc/api/next_api_changes/deprecations/21962-AL.rst
new file mode 100644
index 000000000000..e5ddab747b48
--- /dev/null
+++ b/doc/api/next_api_changes/deprecations/21962-AL.rst
@@ -0,0 +1,5 @@
+``backend_pgf``
+~~~~~~~~~~~~~~~
+The following API elements have been deprecated with no
+replacement: ``NO_ESCAPE``, ``re_mathsep``, ``get_fontspec``, ``get_preamble``,
+``common_texification``, ``writeln``.
| [
{
"components": [
{
"doc": "",
"lines": [
40,
44
],
"name": "__getattr__",
"signature": "class __getattr__:",
"type": "class"
},
{
"doc": "Prepare a LaTeX preamble based on the rcParams configuration.",
"lines"... | [
"lib/matplotlib/tests/test_backend_pgf.py::test_tex_escape[quad_sum:",
"lib/matplotlib/tests/test_backend_pgf.py::test_tex_escape[no",
"lib/matplotlib/tests/test_backend_pgf.py::test_tex_escape[with_underscores-with\\\\_underscores]",
"lib/matplotlib/tests/test_backend_pgf.py::test_tex_escape[%",
"lib/matpl... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Privatize various internal APIs of backend_pgf.
... and merge the now private get_preamble and get_fontspec together.
## PR Summary
## PR Checklist
<!-- Please mark any checkboxes that do not apply to this PR as [N/A]. -->
**Tests and Styling**
- [ ] Has pytest style unit tests (and `pytest` passes).
- [ ] Is [Flake 8](https://flake8.pycqa.org/en/latest/) compliant (install `flake8-docstrings` and run `flake8 --docstring-convention=all`).
**Documentation**
- [ ] New features are documented, with examples if plot related.
- [ ] New features have an entry in `doc/users/next_whats_new/` (follow instructions in README.rst there).
- [ ] API changes documented in `doc/api/next_api_changes/` (follow instructions in README.rst there).
- [ ] Documentation is sphinx and numpydoc compliant (the docs should [build](https://matplotlib.org/devel/documenting_mpl.html#building-the-docs) without error).
<!--
Thank you so much for your PR! To help us review your contribution, please
consider the following points:
- A development guide is available at https://matplotlib.org/devdocs/devel/index.html.
- Help with git and github is available at
https://matplotlib.org/devel/gitwash/development_workflow.html.
- Do not create the PR out of main, but out of a separate branch.
- The PR title should summarize the changes, for example "Raise ValueError on
non-numeric input to set_xlim". Avoid non-descriptive titles such as
"Addresses issue #8576".
- The summary should provide at least 1-2 sentences describing the pull request
in detail (Why is this change required? What problem does it solve?) and
link to any relevant issues.
- If you are contributing fixes to docstrings, please pay attention to
http://matplotlib.org/devel/documenting_mpl.html#formatting. In particular,
note the difference between using single backquotes, double backquotes, and
asterisks in the markup.
We understand that PRs can sometimes be overwhelming, especially as the
reviews start coming in. Please let us know if the reviews are unclear or
the recommended next step seems overly demanding, if you would like help in
addressing a reviewer's comments, or if you have been waiting too long to hear
back on your PR.
-->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in lib/matplotlib/backends/backend_pgf.py]
(definition of __getattr__:)
class __getattr__:
(definition of _get_preamble:)
def _get_preamble():
"""Prepare a LaTeX preamble based on the rcParams configuration."""
(definition of _tex_escape:)
def _tex_escape(text):
"""Do some necessary and/or useful substitutions for texts to be included in
LaTeX documents.
This distinguishes text-mode and math-mode by replacing the math separator
``$`` with ``\(\displaystyle %s\)``. Escaped math separators (``\$``)
are ignored.
The following characters are escaped in text segments: ``_^$%``"""
(definition of _writeln:)
def _writeln(fh, line):
[end of new definitions in lib/matplotlib/backends/backend_pgf.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3d6c3da884fafae4654df68144391cfe9be6f134 | |
sympy__sympy-22661 | 22,661 | sympy/sympy | 1.10 | 81cd2630631e315586f796646a7f9268e71bf484 | 2021-12-13T09:44:19Z | diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py
index c748ebbf5d62..c370bc216353 100644
--- a/sympy/printing/latex.py
+++ b/sympy/printing/latex.py
@@ -2029,7 +2029,7 @@ def _print_RandomDomain(self, d):
if hasattr(d, 'as_boolean'):
return '\\text{Domain: }' + self._print(d.as_boolean())
elif hasattr(d, 'set'):
- return ('\\text{Domain: }' + self._print(d.symbols) + '\\text{ in }' +
+ return ('\\text{Domain: }' + self._print(d.symbols) + ' \\in ' +
self._print(d.set))
elif hasattr(d, 'symbols'):
return '\\text{Domain on }' + self._print(d.symbols)
@@ -2470,6 +2470,18 @@ def _print_LambertW(self, expr, exp=None):
result = "W{0}_{{{1}}}\\left({2}\\right)".format(exp, arg1, arg0)
return result
+ def _print_Expectation(self, expr):
+ return r"\operatorname{{E}}\left[{}\right]".format(self._print(expr.args[0]))
+
+ def _print_Variance(self, expr):
+ return r"\operatorname{{Var}}\left({}\right)".format(self._print(expr.args[0]))
+
+ def _print_Covariance(self, expr):
+ return r"\operatorname{{Cov}}\left({}\right)".format(", ".join(self._print(arg) for arg in expr.args))
+
+ def _print_Probability(self, expr):
+ return r"\operatorname{{P}}\left({}\right)".format(self._print(expr.args[0]))
+
def _print_Morphism(self, morphism):
domain = self._print(morphism.domain)
codomain = self._print(morphism.codomain)
diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py
index 86bc76893647..6a4d258e27a7 100644
--- a/sympy/printing/pretty/pretty.py
+++ b/sympy/printing/pretty/pretty.py
@@ -766,13 +766,19 @@ def _print_MatrixBase(self, e):
def _print_TensorProduct(self, expr):
# This should somehow share the code with _print_WedgeProduct:
- circled_times = "\u2297"
+ if self._use_unicode:
+ circled_times = "\u2297"
+ else:
+ circled_times = ".*"
return self._print_seq(expr.args, None, None, circled_times,
parenthesize=lambda x: precedence_traditional(x) <= PRECEDENCE["Mul"])
def _print_WedgeProduct(self, expr):
# This should somehow share the code with _print_TensorProduct:
- wedge_symbol = "\u2227"
+ if self._use_unicode:
+ wedge_symbol = "\u2227"
+ else:
+ wedge_symbol = '/\\'
return self._print_seq(expr.args, None, None, wedge_symbol,
parenthesize=lambda x: precedence_traditional(x) <= PRECEDENCE["Mul"])
@@ -1546,10 +1552,11 @@ def _print_ExpBase(self, e):
def _print_Exp1(self, e):
return prettyForm(pretty_atom('Exp1', 'e'))
- def _print_Function(self, e, sort=False, func_name=None):
+ def _print_Function(self, e, sort=False, func_name=None, left='(',
+ right=')'):
# optional argument func_name for supplying custom names
# XXX works only for applied functions
- return self._helper_print_function(e.func, e.args, sort=sort, func_name=func_name)
+ return self._helper_print_function(e.func, e.args, sort=sort, func_name=func_name, left=left, right=right)
def _print_mathieuc(self, e):
return self._print_Function(e, func_name='C')
@@ -1563,7 +1570,9 @@ def _print_mathieucprime(self, e):
def _print_mathieusprime(self, e):
return self._print_Function(e, func_name="S'")
- def _helper_print_function(self, func, args, sort=False, func_name=None, delimiter=', ', elementwise=False):
+ def _helper_print_function(self, func, args, sort=False, func_name=None,
+ delimiter=', ', elementwise=False, left='(',
+ right=')'):
if sort:
args = sorted(args, key=default_sort_key)
@@ -1586,7 +1595,8 @@ def _helper_print_function(self, func, args, sort=False, func_name=None, delimit
*stringPict.next(prettyFunc, circ)
)
- prettyArgs = prettyForm(*self._print_seq(args, delimiter=delimiter).parens())
+ prettyArgs = prettyForm(*self._print_seq(args, delimiter=delimiter).parens(
+ left=left, right=right))
pform = prettyForm(
binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
@@ -1671,6 +1681,18 @@ def _print_airybiprime(self, e):
def _print_LambertW(self, e):
return self._print_Function(e, func_name="W")
+ def _print_Covariance(self, e):
+ return self._print_Function(e, func_name="Cov")
+
+ def _print_Variance(self, e):
+ return self._print_Function(e, func_name="Var")
+
+ def _print_Probability(self, e):
+ return self._print_Function(e, func_name="P")
+
+ def _print_Expectation(self, e):
+ return self._print_Function(e, func_name="E", left='[', right=']')
+
def _print_Lambda(self, e):
expr = e.expr
sig = e.signature
@@ -2749,14 +2771,18 @@ def _print_BaseVectorField(self, field):
return self._print(pretty_symbol(s))
def _print_Differential(self, diff):
+ if self._use_unicode:
+ d = '\N{DOUBLE-STRUCK ITALIC SMALL D}'
+ else:
+ d = 'd'
field = diff._form_field
if hasattr(field, '_coord_sys'):
string = field._coord_sys.symbols[field._index].name
- return self._print('\N{DOUBLE-STRUCK ITALIC SMALL D} ' + pretty_symbol(string))
+ return self._print(d + ' ' + pretty_symbol(string))
else:
pform = self._print(field)
pform = prettyForm(*pform.parens())
- return prettyForm(*pform.left("\N{DOUBLE-STRUCK ITALIC SMALL D}"))
+ return prettyForm(*pform.left(d))
def _print_Tr(self, p):
#TODO: Handle indices
| diff --git a/sympy/printing/pretty/tests/test_pretty.py b/sympy/printing/pretty/tests/test_pretty.py
index 35f4a89046d5..5d19d704fe8d 100644
--- a/sympy/printing/pretty/tests/test_pretty.py
+++ b/sympy/printing/pretty/tests/test_pretty.py
@@ -66,6 +66,9 @@
from sympy.sets import ImageSet, ProductSet
from sympy.sets.setexpr import SetExpr
+from sympy.stats.crv_types import Normal
+from sympy.stats.symbolic_probability import (Covariance, Expectation,
+ Probability, Variance)
from sympy.tensor.array import (ImmutableDenseNDimArray, ImmutableSparseNDimArray,
MutableDenseNDimArray, MutableSparseNDimArray, tensorproduct)
from sympy.tensor.functions import TensorProduct
@@ -3608,6 +3611,7 @@ def test_diffgeom_print_WedgeProduct():
from sympy.diffgeom import WedgeProduct
wp = WedgeProduct(R2.dx, R2.dy)
assert upretty(wp) == "ⅆ x∧ⅆ y"
+ assert pretty(wp) == r"d x/\d y"
def test_Adjoint():
@@ -7628,6 +7632,7 @@ def test_issue_17857():
assert pretty(Range(-oo, oo)) == '{..., -1, 0, 1, ...}'
assert pretty(Range(oo, -oo, -1)) == '{..., 1, 0, -1, ...}'
+
def test_issue_18272():
x = Symbol('x')
n = Symbol('n')
@@ -7658,6 +7663,19 @@ def test_Str():
from sympy.core.symbol import Str
assert pretty(Str('x')) == 'x'
+
+
+def test_symbolic_probability():
+ mu = symbols("mu")
+ sigma = symbols("sigma", positive=True)
+ X = Normal("X", mu, sigma)
+ assert pretty(Expectation(X)) == r'E[X]'
+ assert pretty(Variance(X)) == r'Var(X)'
+ assert pretty(Probability(X > 0)) == r'P(X > 0)'
+ Y = Normal("Y", mu, sigma)
+ assert pretty(Covariance(X, Y)) == 'Cov(X, Y)'
+
+
def test_diffgeom():
from sympy.diffgeom import Manifold, Patch, CoordSystem, BaseScalarField
x,y = symbols('x y', real=True)
diff --git a/sympy/printing/tests/test_latex.py b/sympy/printing/tests/test_latex.py
index c344a4c8c716..e2f4ebfa8307 100644
--- a/sympy/printing/tests/test_latex.py
+++ b/sympy/printing/tests/test_latex.py
@@ -63,6 +63,9 @@
from sympy.sets.fancysets import (ComplexRegion, ImageSet, Range)
from sympy.sets.sets import (FiniteSet, Interval, Union, Intersection, Complement, SymmetricDifference, ProductSet)
from sympy.sets.setexpr import SetExpr
+from sympy.stats.crv_types import Normal
+from sympy.stats.symbolic_probability import (Covariance, Expectation,
+ Probability, Variance)
from sympy.tensor.array import (ImmutableDenseNDimArray,
ImmutableSparseNDimArray,
MutableSparseNDimArray,
@@ -1735,7 +1738,7 @@ def test_latex_RandomDomain():
r"\text{Domain: }0 \leq a \wedge 0 \leq b \wedge a < \infty \wedge b < \infty"
assert latex(RandomDomain(FiniteSet(x), FiniteSet(1, 2))) == \
- r'\text{Domain: }\left\{x\right\}\text{ in }\left\{1, 2\right\}'
+ r'\text{Domain: }\left\{x\right\} \in \left\{1, 2\right\}'
def test_PrettyPoly():
from sympy.polys.domains import QQ
@@ -2614,6 +2617,17 @@ def test_issue_15353():
r'\cos{\left(a x \right)} = 0 \right\}'
+def test_latex_symbolic_probability():
+ mu = symbols("mu")
+ sigma = symbols("sigma", positive=True)
+ X = Normal("X", mu, sigma)
+ assert latex(Expectation(X)) == r'\operatorname{E}\left[X\right]'
+ assert latex(Variance(X)) == r'\operatorname{Var}\left(X\right)'
+ assert latex(Probability(X > 0)) == r'\operatorname{P}\left(X > 0\right)'
+ Y = Normal("Y", mu, sigma)
+ assert latex(Covariance(X, Y)) == r'\operatorname{Cov}\left(X, Y\right)'
+
+
def test_trace():
# Issue 15303
from sympy.matrices.expressions.trace import trace
| [
{
"components": [
{
"doc": "",
"lines": [
2473,
2474
],
"name": "LatexPrinter._print_Expectation",
"signature": "def _print_Expectation(self, expr):",
"type": "function"
},
{
"doc": "",
"lines": [
247... | [
"test_diffgeom_print_WedgeProduct",
"test_symbolic_probability",
"test_latex_RandomDomain",
"test_latex_symbolic_probability"
] | [
"test_pretty_ascii_str",
"test_pretty_unicode_str",
"test_upretty_greek",
"test_upretty_multiindex",
"test_upretty_sub_super",
"test_upretty_subs_missing_in_24",
"test_missing_in_2X_issue_9047",
"test_upretty_modifiers",
"test_pretty_Cycle",
"test_pretty_Permutation",
"test_pretty_basic",
"tes... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
LaTeX and pretty printing for symbolic statistics
<!-- Your title above should be a short description of what
was changed. Do not include the issue number in the title. -->
#### References to other Issues or PRs
<!-- If this pull request fixes an issue, write "Fixes #NNNN" in that exact
format, e.g. "Fixes #1234" (see
https://tinyurl.com/auto-closing for more information). Also, please
write a comment on that issue linking back to this pull request once it is
open. -->
Related to #22654
#### Brief description of what is fixed or changed
`Covariance`, `Expectation`, `Probability` and `Variance` now have dedicated LaTex and pretty printing.


#### Other comments
Also changed so that a random domain prints using the `\in` symbol rather than the text `in`.
Added some rudimentary ASCII pretty-printing option for `Differential` (`d`), `WedgeProduct` (`/\`), and `TensorProduct` (`.*`). The last is a bit doubtful, but probably better than an error...
#### Release Notes
<!-- Write the release notes for this release below between the BEGIN and END
statements. The basic format is a bulleted list with the name of the subpackage
and the release note for this PR. For example:
* solvers
* Added a new solver for logarithmic equations.
* functions
* Fixed a bug with log of integers.
or if no release note(s) should be included use:
NO ENTRY
See https://github.com/sympy/sympy/wiki/Writing-Release-Notes for more
information on how to write release notes. The bot will check your release
notes automatically to see if they are formatted correctly. -->
<!-- BEGIN RELEASE NOTES -->
* printing
* Dedicated LaTeX and pretty printing for `Covariance`, `Expectation`, `Probability` and `Variance`.
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/printing/latex.py]
(definition of LatexPrinter._print_Expectation:)
def _print_Expectation(self, expr):
(definition of LatexPrinter._print_Variance:)
def _print_Variance(self, expr):
(definition of LatexPrinter._print_Covariance:)
def _print_Covariance(self, expr):
(definition of LatexPrinter._print_Probability:)
def _print_Probability(self, expr):
[end of new definitions in sympy/printing/latex.py]
[start of new definitions in sympy/printing/pretty/pretty.py]
(definition of PrettyPrinter._print_Covariance:)
def _print_Covariance(self, e):
(definition of PrettyPrinter._print_Variance:)
def _print_Variance(self, e):
(definition of PrettyPrinter._print_Probability:)
def _print_Probability(self, e):
(definition of PrettyPrinter._print_Expectation:)
def _print_Expectation(self, e):
[end of new definitions in sympy/printing/pretty/pretty.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3e8695add7a25c8d70aeba7d6137496df02863fd | ||
RDFLib__rdflib-1502 | 1,502 | RDFLib/rdflib | null | 4f3b499c80f6d7de127cbf07e8f6b1b50789a087 | 2021-12-13T00:54:19Z | diff --git a/rdflib/graph.py b/rdflib/graph.py
index 87097b1c9..da18a2554 100644
--- a/rdflib/graph.py
+++ b/rdflib/graph.py
@@ -1528,6 +1528,59 @@ def do_de_skolemize2(t):
return retval
+ def cbd(self, resource):
+ """Retrieves the Concise Bounded Description of a Resource from a Graph
+
+ Concise Bounded Description (CBD) is defined in [1] as:
+
+ Given a particular node (the starting node) in a particular RDF graph (the source graph), a subgraph of that
+ particular graph, taken to comprise a concise bounded description of the resource denoted by the starting node,
+ can be identified as follows:
+
+ 1. Include in the subgraph all statements in the source graph where the subject of the statement is the
+ starting node;
+
+ 2. Recursively, for all statements identified in the subgraph thus far having a blank node object, include
+ in the subgraph all statements in the source graph where the subject of the statement is the blank node
+ in question and which are not already included in the subgraph.
+
+ 3. Recursively, for all statements included in the subgraph thus far, for all reifications of each statement
+ in the source graph, include the concise bounded description beginning from the rdf:Statement node of
+ each reification.
+
+ This results in a subgraph where the object nodes are either URI references, literals, or blank nodes not
+ serving as the subject of any statement in the graph.
+
+ [1] https://www.w3.org/Submission/CBD/
+
+ :param resource: a URIRef object, of the Resource for queried for
+ :return: a Graph, subgraph of self
+
+ """
+ subgraph = Graph()
+
+ def add_to_cbd(uri):
+ for s, p, o in self.triples((uri, None, None)):
+ subgraph.add((s, p, o))
+ # recurse 'down' through ll Blank Nodes
+ if type(o) == BNode and not (o, None, None) in subgraph:
+ add_to_cbd(o)
+
+ # for Rule 3 (reification)
+ # for any rdf:Statement in the graph with the given URI as the object of rdf:subject,
+ # get all triples with that rdf:Statement instance as subject
+
+ # find any subject s where the predicate is rdf:subject and this uri is the object
+ # (these subjects are of type rdf:Statement, given the domain of rdf:subject)
+ for s, p, o in self.triples((None, RDF.subject, uri)):
+ # find all triples with s as the subject and add these to the subgraph
+ for s2, p2, o2 in self.triples((s, None, None)):
+ subgraph.add((s2, p2, o2))
+
+ add_to_cbd(resource)
+
+ return subgraph
+
class ConjunctiveGraph(Graph):
"""A ConjunctiveGraph is an (unnamed) aggregation of all the named
| diff --git a/test/test_graph_cbd.py b/test/test_graph_cbd.py
new file mode 100644
index 000000000..9d02f7c50
--- /dev/null
+++ b/test/test_graph_cbd.py
@@ -0,0 +1,111 @@
+import pytest
+from rdflib import Graph, Namespace
+
+
+"""Tests the Graph class' cbd() function"""
+
+EX = Namespace("http://ex/")
+
+
+@pytest.fixture
+def get_graph():
+ g = Graph()
+ # adding example data for testing
+ g.parse(
+ data="""
+ PREFIX ex: <http://ex/>
+ PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
+
+ ex:R1
+ a rdf:Resource ;
+ ex:hasChild ex:R2 , ex:R3 .
+
+ ex:R2
+ ex:propOne ex:P1 ;
+ ex:propTwo ex:P2 .
+
+ ex:R3
+ ex:propOne ex:P3 ;
+ ex:propTwo ex:P4 ;
+ ex:propThree [
+ a rdf:Resource ;
+ ex:propFour "Some Literal" ;
+ ex:propFive ex:P5 ;
+ ex:propSix [
+ ex:propSeven ex:P7 ;
+ ] ;
+ ] .
+ """,
+ format="turtle",
+ )
+
+ g.bind("ex", EX)
+ yield g
+ g.close()
+
+
+def testCbd(get_graph):
+ g = get_graph
+ assert len(g.cbd(EX.R1)) == 3, "cbd() for R1 should return 3 triples"
+
+ assert len(g.cbd(EX.R2)) == 2, "cbd() for R3 should return 2 triples"
+
+ assert len(g.cbd(EX.R3)) == 8, "cbd() for R3 should return 8 triples"
+
+ assert len(g.cbd(EX.R4)) == 0, "cbd() for R4 should return 0 triples"
+
+
+def testCbdReified(get_graph):
+ g = get_graph
+ # add some reified triples to the testing graph
+ g.parse(
+ data="""
+ PREFIX ex: <http://ex/>
+ PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
+
+ ex:R5
+ ex:propOne ex:P1 ;
+ ex:propTwo ex:P2 ;
+ ex:propRei ex:Pre1 .
+
+ ex:S
+ a rdf:Statement ;
+ rdf:subject ex:R5 ;
+ rdf:predicate ex:propRei ;
+ rdf:object ex:Pre1 ;
+ ex:otherReiProp ex:Pre2 .
+ """,
+ format="turtle",
+ )
+
+ # this cbd() call should get the 3 basic triples with ex:R5 as subject as well as 5 more from the reified
+ # statement
+ assert len(g.cbd(EX.R5)) == (3 + 5), "cbd() for R5 should return 8 triples"
+
+ # add crazy reified triples to the testing graph
+ g.parse(
+ data="""
+ PREFIX ex: <http://ex/>
+ PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
+ ex:R6
+ ex:propOne ex:P1 ;
+ ex:propTwo ex:P2 ;
+ ex:propRei ex:Pre1 .
+ ex:S1
+ a rdf:Statement ;
+ rdf:subject ex:R6 ;
+ rdf:predicate ex:propRei ;
+ rdf:object ex:Pre1 ;
+ ex:otherReiProp ex:Pre3 .
+
+ ex:S2
+ rdf:subject ex:R6 ;
+ rdf:predicate ex:propRei2 ;
+ rdf:object ex:Pre2 ;
+ ex:otherReiProp ex:Pre4 ;
+ ex:otherReiProp ex:Pre5 .
+ """,
+ format="turtle",
+ )
+
+ assert len(g.cbd(EX.R6)) == (3 + 5 + 5), "cbd() for R6 should return 12 triples"
| [
{
"components": [
{
"doc": "Retrieves the Concise Bounded Description of a Resource from a Graph\n\nConcise Bounded Description (CBD) is defined in [1] as:\n\nGiven a particular node (the starting node) in a particular RDF graph (the source graph), a subgraph of that\nparticular graph, taken to co... | [
"test/test_graph_cbd.py::testCbd",
"test/test_graph_cbd.py::testCbdReified"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Re-make of nicholascar's “Concise Bounded Description” PR #968 ...
which, depite the Github record [1] _didn't_ actually make it into master.
[1] https://github.com/RDFLib/rdflib/pull/968/commits/a7863d25dc47730f4ef8bdca91c4e35c7f7b1ccf
## Proposed Changes
Adds `Graph.cbd()` method
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rdflib/graph.py]
(definition of Graph.cbd:)
def cbd(self, resource):
"""Retrieves the Concise Bounded Description of a Resource from a Graph
Concise Bounded Description (CBD) is defined in [1] as:
Given a particular node (the starting node) in a particular RDF graph (the source graph), a subgraph of that
particular graph, taken to comprise a concise bounded description of the resource denoted by the starting node,
can be identified as follows:
1. Include in the subgraph all statements in the source graph where the subject of the statement is the
starting node;
2. Recursively, for all statements identified in the subgraph thus far having a blank node object, include
in the subgraph all statements in the source graph where the subject of the statement is the blank node
in question and which are not already included in the subgraph.
3. Recursively, for all statements included in the subgraph thus far, for all reifications of each statement
in the source graph, include the concise bounded description beginning from the rdf:Statement node of
each reification.
This results in a subgraph where the object nodes are either URI references, literals, or blank nodes not
serving as the subject of any statement in the graph.
[1] https://www.w3.org/Submission/CBD/
:param resource: a URIRef object, of the Resource for queried for
:return: a Graph, subgraph of self"""
(definition of Graph.cbd.add_to_cbd:)
def add_to_cbd(uri):
[end of new definitions in rdflib/graph.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 0c11debb5178157baeac27b735e49a757916d2a6 | ||
matplotlib__matplotlib-21935 | 21,935 | matplotlib/matplotlib | 3.5 | 1535cdc3500762088e53178efc22956365dcdac7 | 2021-12-12T22:40:31Z | diff --git a/lib/matplotlib/artist.py b/lib/matplotlib/artist.py
index 20172a286503..a0888fa4ddda 100644
--- a/lib/matplotlib/artist.py
+++ b/lib/matplotlib/artist.py
@@ -320,23 +320,6 @@ def get_window_extent(self, renderer):
"""
return Bbox([[0, 0], [0, 0]])
- def _get_clipping_extent_bbox(self):
- """
- Return a bbox with the extents of the intersection of the clip_path
- and clip_box for this artist, or None if both of these are
- None, or ``get_clip_on`` is False.
- """
- bbox = None
- if self.get_clip_on():
- clip_box = self.get_clip_box()
- if clip_box is not None:
- bbox = clip_box
- clip_path = self.get_clip_path()
- if clip_path is not None and bbox is not None:
- clip_path = clip_path.get_fully_transformed_path()
- bbox = Bbox.intersection(bbox, clip_path.get_extents())
- return bbox
-
def get_tightbbox(self, renderer):
"""
Like `.Artist.get_window_extent`, but includes any clipping.
@@ -358,7 +341,7 @@ def get_tightbbox(self, renderer):
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = self.get_clip_path()
- if clip_path is not None and bbox is not None:
+ if clip_path is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox, clip_path.get_extents())
return bbox
@@ -844,6 +827,30 @@ def get_in_layout(self):
"""
return self._in_layout
+ def _fully_clipped_to_axes(self):
+ """
+ Return a boolean flag, ``True`` if the artist is clipped to the axes
+ and can thus be skipped in layout calculations. Requires `get_clip_on`
+ is True, one of `clip_box` or `clip_path` is set, ``clip_box.extents``
+ is equivalent to ``ax.bbox.extents`` (if set), and ``clip_path._patch``
+ is equivalent to ``ax.patch`` (if set).
+ """
+ # Note that ``clip_path.get_fully_transformed_path().get_extents()``
+ # cannot be directly compared to ``axes.bbox.extents`` because the
+ # extents may be undefined (i.e. equivalent to ``Bbox.null()``)
+ # before the associated artist is drawn, and this method is meant
+ # to determine whether ``axes.get_tightbbox()`` may bypass drawing
+ clip_box = self.get_clip_box()
+ clip_path = self.get_clip_path()
+ return (self.axes is not None
+ and self.get_clip_on()
+ and (clip_box is not None or clip_path is not None)
+ and (clip_box is None
+ or np.all(clip_box.extents == self.axes.bbox.extents))
+ and (clip_path is None
+ or isinstance(clip_path, TransformedPatchPath)
+ and clip_path._patch is self.axes.patch))
+
def get_clip_on(self):
"""Return whether the artist uses clipping."""
return self._clipon
diff --git a/lib/matplotlib/axes/_base.py b/lib/matplotlib/axes/_base.py
index 21d6f5ac552b..57cf86f47c4a 100644
--- a/lib/matplotlib/axes/_base.py
+++ b/lib/matplotlib/axes/_base.py
@@ -12,7 +12,7 @@
import numpy as np
import matplotlib as mpl
-from matplotlib import _api, cbook, docstring
+from matplotlib import _api, cbook, docstring, offsetbox
import matplotlib.artist as martist
import matplotlib.axis as maxis
from matplotlib.cbook import _OrderedSet, _check_1d, index_of
@@ -4541,21 +4541,26 @@ def get_default_bbox_extra_artists(self):
artists = self.get_children()
+ for _axis in self._get_axis_list():
+ # axis tight bboxes are calculated separately inside
+ # Axes.get_tightbbox() using for_layout_only=True
+ artists.remove(_axis)
if not (self.axison and self._frameon):
# don't do bbox on spines if frame not on.
for spine in self.spines.values():
artists.remove(spine)
- if not self.axison:
- for _axis in self._get_axis_list():
- artists.remove(_axis)
-
artists.remove(self.title)
artists.remove(self._left_title)
artists.remove(self._right_title)
- return [artist for artist in artists
- if (artist.get_visible() and artist.get_in_layout())]
+ # always include types that do not internally implement clipping
+ # to axes. may have clip_on set to True and clip_box equivalent
+ # to ax.bbox but then ignore these properties during draws.
+ noclip = (_AxesBase, maxis.Axis,
+ offsetbox.AnnotationBbox, offsetbox.OffsetBox)
+ return [a for a in artists if a.get_visible() and a.get_in_layout()
+ and (isinstance(a, noclip) or not a._fully_clipped_to_axes())]
def get_tightbbox(self, renderer, call_axes_locator=True,
bbox_extra_artists=None, *, for_layout_only=False):
@@ -4612,17 +4617,11 @@ def get_tightbbox(self, renderer, call_axes_locator=True,
else:
self.apply_aspect()
- if self.axison:
- if self.xaxis.get_visible():
- bb_xaxis = martist._get_tightbbox_for_layout_only(
- self.xaxis, renderer)
- if bb_xaxis:
- bb.append(bb_xaxis)
- if self.yaxis.get_visible():
- bb_yaxis = martist._get_tightbbox_for_layout_only(
- self.yaxis, renderer)
- if bb_yaxis:
- bb.append(bb_yaxis)
+ for axis in self._get_axis_list():
+ if self.axison and axis.get_visible():
+ ba = martist._get_tightbbox_for_layout_only(axis, renderer)
+ if ba:
+ bb.append(ba)
self._update_title_position(renderer)
axbbox = self.get_window_extent(renderer)
bb.append(axbbox)
@@ -4643,17 +4642,6 @@ def get_tightbbox(self, renderer, call_axes_locator=True,
bbox_artists = self.get_default_bbox_extra_artists()
for a in bbox_artists:
- # Extra check here to quickly see if clipping is on and
- # contained in the Axes. If it is, don't get the tightbbox for
- # this artist because this can be expensive:
- clip_extent = a._get_clipping_extent_bbox()
- if clip_extent is not None:
- clip_extent = mtransforms.Bbox.intersection(
- clip_extent, axbbox)
- if np.all(clip_extent.extents == axbbox.extents):
- # clip extent is inside the Axes bbox so don't check
- # this artist
- continue
bbox = a.get_tightbbox(renderer)
if (bbox is not None
and 0 < bbox.width < np.inf
| diff --git a/lib/matplotlib/tests/test_tightlayout.py b/lib/matplotlib/tests/test_tightlayout.py
index e9b01b160da1..43ebd535be2b 100644
--- a/lib/matplotlib/tests/test_tightlayout.py
+++ b/lib/matplotlib/tests/test_tightlayout.py
@@ -342,3 +342,27 @@ def test_manual_colorbar():
fig.colorbar(pts, cax=cax)
with pytest.warns(UserWarning, match="This figure includes Axes"):
fig.tight_layout()
+
+
+def test_clipped_to_axes():
+ # Ensure that _fully_clipped_to_axes() returns True under default
+ # conditions for all projection types. Axes.get_tightbbox()
+ # uses this to skip artists in layout calculations.
+ arr = np.arange(100).reshape((10, 10))
+ fig = plt.figure(figsize=(6, 2))
+ ax1 = fig.add_subplot(131, projection='rectilinear')
+ ax2 = fig.add_subplot(132, projection='mollweide')
+ ax3 = fig.add_subplot(133, projection='polar')
+ for ax in (ax1, ax2, ax3):
+ # Default conditions (clipped by ax.bbox or ax.patch)
+ ax.grid(False)
+ h, = ax.plot(arr[:, 0])
+ m = ax.pcolor(arr)
+ assert h._fully_clipped_to_axes()
+ assert m._fully_clipped_to_axes()
+ # Non-default conditions (not clipped by ax.patch)
+ rect = Rectangle((0, 0), 0.5, 0.5, transform=ax.transAxes)
+ h.set_clip_path(rect)
+ m.set_clip_path(rect.get_path(), rect.get_transform())
+ assert not h._fully_clipped_to_axes()
+ assert not m._fully_clipped_to_axes()
| [
{
"components": [
{
"doc": "Return a boolean flag, ``True`` if the artist is clipped to the axes\nand can thus be skipped in layout calculations. Requires `get_clip_on`\nis True, one of `clip_box` or `clip_path` is set, ``clip_box.extents``\nis equivalent to ``ax.bbox.extents`` (if set), and ``cli... | [
"lib/matplotlib/tests/test_tightlayout.py::test_clipped_to_axes"
] | [
"lib/matplotlib/tests/test_tightlayout.py::test_tight_layout1[png]",
"lib/matplotlib/tests/test_tightlayout.py::test_tight_layout2[png]",
"lib/matplotlib/tests/test_tightlayout.py::test_tight_layout3[png]",
"lib/matplotlib/tests/test_tightlayout.py::test_tight_layout4[png]",
"lib/matplotlib/tests/test_tight... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Significantly improve tight layout performance for cartopy axes
This PR is adapted from SciTools/cartopy#1956 following our discussion there.
## Summary
This PR significantly improves the speed of `Axes.get_tightbbox()` for non-rectilinear axes (in particular, ~~matplotlib's PolarAxes and cartopy's GeoAxes~~ cartopy `GeoAxes` with transformed coordinates) by preventing unnecessary and expensive `get_window_extent()` computations. In the presence of complex, high-resolution artists, it can result in a **2x improvement** to the draw time for cartopy `GeoAxes` when "tight layout" is enabled.
There may be a better way to implement this -- looking forward to everyone's thoughts.
## Details
To prevent unnecessary and expensive `get_window_extent()` computations, `Axes.get_tightbbox()` skips artists with `clip_on` set to True and whose `clip_box.extents` are equivalent to `ax.bbox.extents`. However, while all axes artists are clipped by `TransformedPatchPath(ax.patch)` by default, only artists drawn inside rectilinear projections are clipped by `ax.bbox` (see the below example).
This PR replaces `Artist._get_clipping_extent_bbox()` with `Artist._is_axes_clipped()`. Now,`Axes.get_tightbbox()` includes `ax.patch.get_window_extent()` in its computation, and skips all artists clipped by **either** `ax.bbox` or `ax.patch` (i.e., artists for which `Artist._is_axes_clipped()` returns True).
This PR also removes the computation of the intersection of `clip_box` and `clip_path`, under the assumption that the independent test of `clip_path` covers those instances, but perhaps that should be added back.
## Example
Here is an example with a cartopy `GeoAxes`:
```python
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
fig, ax = plt.subplots(subplot_kw={'projection': ccrs.Robinson()})
N = 5000 # large dataset
lon = np.linspace(-180, 180, N)
lat = np.linspace(-90, 90, N)
data = np.random.rand(N, N)
m = ax.pcolormesh(lon, lat, data, transform=ccrs.PlateCarree())
print(m.get_clip_box() is None) # returns True
%timeit fig.tight_layout()
```
Performance before:
```
638 ms ± 67.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
```
Performance after:
```
2.37 ms ± 60.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
```
And of course the performance difference is larger the larger the dataset.
## Checklist
<!-- Please mark any checkboxes that do not apply to this PR as [N/A]. -->
**Tests and Styling**
- [x] Has pytest style unit tests (and `pytest` passes).
- [x] Is [Flake 8](https://flake8.pycqa.org/en/latest/) compliant (install `flake8-docstrings` and run `flake8 --docstring-convention=all`).
**Documentation**
- [x] (n/a?) New features are documented, with examples if plot related.
- [x] (n/a?) New features have an entry in `doc/users/next_whats_new/` (follow instructions in README.rst there).
- [x] (n/a?) API changes documented in `doc/api/next_api_changes/` (follow instructions in README.rst there).
- [x] Documentation is sphinx and numpydoc compliant (the docs should [build](https://matplotlib.org/devel/documenting_mpl.html#building-the-docs) without error).
<!--
Thank you so much for your PR! To help us review your contribution, please
consider the following points:
- A development guide is available at https://matplotlib.org/devdocs/devel/index.html.
- Help with git and github is available at
https://matplotlib.org/devel/gitwash/development_workflow.html.
- Do not create the PR out of main, but out of a separate branch.
- The PR title should summarize the changes, for example "Raise ValueError on
non-numeric input to set_xlim". Avoid non-descriptive titles such as
"Addresses issue #8576".
- The summary should provide at least 1-2 sentences describing the pull request
in detail (Why is this change required? What problem does it solve?) and
link to any relevant issues.
- If you are contributing fixes to docstrings, please pay attention to
http://matplotlib.org/devel/documenting_mpl.html#formatting. In particular,
note the difference between using single backquotes, double backquotes, and
asterisks in the markup.
We understand that PRs can sometimes be overwhelming, especially as the
reviews start coming in. Please let us know if the reviews are unclear or
the recommended next step seems overly demanding, if you would like help in
addressing a reviewer's comments, or if you have been waiting too long to hear
back on your PR.
-->
----------
Added a simple test that `artist._is_axes_clipped()` returns `True` under default conditions / various projections. Note all non-rectilinear axes (e.g. `PolarAxes`, `MollweideAxes`, cartopy `GeoAxes`) have `artist.get_clip_box()` set to `None`.
I can also see the origin of this bug: The `Artist._get_clipping_extent_bbox()` lines that I removed (previously used inside `Axes.get_tightbbox()` to skip artists) mirror the below lines in `Artist.get_tightbbox()`.
https://github.com/matplotlib/matplotlib/blob/1a74793837286b588789441c6285c174f643a5ea/lib/matplotlib/artist.py#L340-L364
While `Artist.get_tightbbox()` starts the `if` block with a valid `bbox`, `Artist._get_clipping_extent_bbox()` starts with `bbox = None`... so when it gets to Line 361, the `clip_path` is ignored, because there is no `bbox` to intersect with.
I've improved this PR in a force push:
* Renamed `_is_axes_clipped()` to `_fully_clipped_to_axes` (thanks @tacaswell).
* Added test lines that assert `_fully_clipped_to_axes()` is False for non-default clipping situations (re: @jklymak).
* Removed the redundant addition of `ax.patch` to the bboxes (it's included in `get_default_bbox_extra_artists()`).
* Condensed multiple return statements into a single boolean return statement (possibly cleaner?)
* Moved the line that skips artists from `Axes.get_tightbbox()` to `Axes.get_default_bbox_extra_artists()`. Note this means the public `get_default_bbox_extra_artists()` will return different values compared to previous versions.
I also made the following additional changes:
* Removed an unnecessary `bbox is not None` check in `Artist.get_tightbbox()` (this is always True).
* Removed axis instances from `get_default_bbox_extra_artists()` (was redundant -- see `Axes.get_tightbbox()`).
* Replaced `Axes.get_tightbbox()` reference to `[xy]axis` with `ax._get_axis_list()` (consistent with removed lines).
* Always include artists that don't internally implement clipping (should fix obscure tight layout "bugs").
The latter points also resolve a second inefficiency: Currently, `Axes.get_tightbbox()` *both* 1) computes the `xaxis` and `yaxis` tight bbox using `for_layout_only=True` on these lines:
https://github.com/matplotlib/matplotlib/blob/710fce3df95e22701bd68bf6af2c8adbc9d67a79/lib/matplotlib/axes/_base.py#L4616-L4634
and 2) includes `xaxis` and `yaxis` as "extra artists" in `get_default_bbox_extra_artists()`. This meant that their bounding boxes would be effectively calculated twice if they were not "skipped" by the `_get_clipping_extent_bbox` check. For some reason, this check *was* successfully skipping `xaxis` and `yaxis` in rectilinear axes, because they have a `clip_on` set to `True` and `clip_box` set to `ax.bbox` (despite the fact that they are *not* clipped when drawn..... weird). However, this failed to skip non-rectilinear axis instances, because they only have a `clip_path` set to `ax.patch`. After this PR, axis instances are excluded from `Axes.get_default_bbox_extra_artists()`, and their extents are always calculated once.
I've also verified that, as before, `ax.patch` and `ax.spines` are still included as default "extra artists". Here's a test I used to determine the default clipping settings for various axes components:
```python
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
fig = plt.figure(figsize=(4, 2))
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132, projection='polar')
ax3 = fig.add_subplot(133, projection='hammer')
for ax in (ax1, ax2, ax3):
print(ax)
for a in (ax.patch, tuple(ax.spines.values())[0]):
clip_box = a.get_clip_box()
b1 = clip_box is not None and np.all(clip_box.extents == ax.bbox.extents)
clip_path = a.get_clip_path()
b2 = clip_path is not None and clip_path._patch is ax.patch
print(a, '\nclip on?', a.get_clip_on(), '\naxes clip box?', b1, '\naxes clip path?', b2, '\nFULLY CLIPPED?', a._fully_clipped_to_axes())
print()
```
Results:
```python
AxesSubplot(0.125,0.11;0.227941x0.77)
Rectangle(xy=(0, 0), width=1, height=1, angle=0)
clip on? True
axes clip box? False
axes clip path? False
FULLY CLIPPED? False
Spine
clip on? True
axes clip box? False
axes clip path? False
FULLY CLIPPED? False
PolarAxesSubplot(0.398529,0.11;0.227941x0.77)
Wedge(center=(0.5, 0.5), r=0.5, theta1=0, theta2=360, width=None)
clip on? True
axes clip box? False
axes clip path? False
FULLY CLIPPED? False
Spine
clip on? True
axes clip box? False
axes clip path? False
FULLY CLIPPED? False
HammerAxesSubplot(0.672059,0.11;0.227941x0.77)
Circle(xy=(0.5, 0.5), radius=0.5)
clip on? True
axes clip box? False
axes clip path? False
FULLY CLIPPED? False
Spine
clip on? True
axes clip box? False
axes clip path? False
FULLY CLIPPED? False
```
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in lib/matplotlib/artist.py]
(definition of Artist._fully_clipped_to_axes:)
def _fully_clipped_to_axes(self):
"""Return a boolean flag, ``True`` if the artist is clipped to the axes
and can thus be skipped in layout calculations. Requires `get_clip_on`
is True, one of `clip_box` or `clip_path` is set, ``clip_box.extents``
is equivalent to ``ax.bbox.extents`` (if set), and ``clip_path._patch``
is equivalent to ``ax.patch`` (if set)."""
[end of new definitions in lib/matplotlib/artist.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3d6c3da884fafae4654df68144391cfe9be6f134 | ||
rytilahti__python-miio-1236 | 1,236 | rytilahti/python-miio | null | 4342e3df10af78a508796ac44ab9f04986e535dd | 2021-12-12T16:47:54Z | diff --git a/miio/integrations/vacuum/roborock/vacuum.py b/miio/integrations/vacuum/roborock/vacuum.py
index 657f2191f..de6e67bf3 100644
--- a/miio/integrations/vacuum/roborock/vacuum.py
+++ b/miio/integrations/vacuum/roborock/vacuum.py
@@ -114,6 +114,15 @@ class MopMode(enum.Enum):
Deep = 301
+class MopIntensity(enum.Enum):
+ """Mop scrub intensity on S7."""
+
+ Close = 200
+ Mild = 201
+ Moderate = 202
+ Intense = 203
+
+
class CarpetCleaningMode(enum.Enum):
"""Type of carpet cleaning/avoidance."""
@@ -835,6 +844,22 @@ def set_mop_mode(self, mop_mode: MopMode):
"""Set mop mode setting."""
return self.send("set_mop_mode", [mop_mode.value])[0] == "ok"
+ @command()
+ def mop_intensity(self) -> MopIntensity:
+ """Get mop scrub intensity setting."""
+ if self.model != ROCKROBO_S7:
+ raise VacuumException("Mop scrub intensity not supported by %s", self.model)
+
+ return MopIntensity(self.send("get_water_box_custom_mode")[0])
+
+ @command(click.argument("mop_intensity", type=EnumType(MopIntensity)))
+ def set_mop_intensity(self, mop_intensity: MopIntensity):
+ """Set mop scrub intensity setting."""
+ if self.model != ROCKROBO_S7:
+ raise VacuumException("Mop scrub intensity not supported by %s", self.model)
+
+ return self.send("set_water_box_custom_mode", [mop_intensity.value])
+
@command()
def child_lock(self) -> bool:
"""Get child lock setting."""
| diff --git a/miio/integrations/vacuum/roborock/tests/test_vacuum.py b/miio/integrations/vacuum/roborock/tests/test_vacuum.py
index d08d8a586..433062b1a 100644
--- a/miio/integrations/vacuum/roborock/tests/test_vacuum.py
+++ b/miio/integrations/vacuum/roborock/tests/test_vacuum.py
@@ -7,7 +7,13 @@
from miio import RoborockVacuum, Vacuum, VacuumStatus
from miio.tests.dummies import DummyDevice
-from ..vacuum import CarpetCleaningMode, MopMode
+from ..vacuum import (
+ ROCKROBO_S7,
+ CarpetCleaningMode,
+ MopIntensity,
+ MopMode,
+ VacuumException,
+)
class DummyVacuum(DummyDevice, RoborockVacuum):
@@ -312,6 +318,16 @@ def test_mop_mode(self):
with patch.object(self.device, "send", return_value=[32453]):
assert self.device.mop_mode() is None
+ def test_mop_intensity_model_check(self):
+ """Test Roborock S7 check when getting mop intensity."""
+ with pytest.raises(VacuumException):
+ self.device.mop_intensity()
+
+ def test_set_mop_intensity_model_check(self):
+ """Test Roborock S7 check when setting mop intensity."""
+ with pytest.raises(VacuumException):
+ self.device.set_mop_intensity(MopIntensity.Intense)
+
def test_deprecated_vacuum(caplog):
with pytest.deprecated_call():
@@ -319,3 +335,28 @@ def test_deprecated_vacuum(caplog):
with pytest.deprecated_call():
from miio.vacuum import ROCKROBO_S6 # noqa: F401
+
+
+class DummyVacuumS7(DummyVacuum):
+ def __init__(self, *args, **kwargs):
+ self._model = ROCKROBO_S7
+
+
+@pytest.fixture(scope="class")
+def dummyvacuums7(request):
+ request.cls.device = DummyVacuumS7()
+
+
+@pytest.mark.usefixtures("dummyvacuums7")
+class TestVacuumS7(TestCase):
+ def test_mop_intensity(self):
+ """Test getting mop intensity."""
+ with patch.object(self.device, "send", return_value=[203]) as mock_method:
+ assert self.device.mop_intensity()
+ mock_method.assert_called_once_with("get_water_box_custom_mode")
+
+ def test_set_mop_intensity(self):
+ """Test setting mop intensity."""
+ with patch.object(self.device, "send", return_value=[203]) as mock_method:
+ assert self.device.set_mop_intensity(MopIntensity.Intense)
+ mock_method.assert_called_once_with("set_water_box_custom_mode", [203])
| [
{
"components": [
{
"doc": "Mop scrub intensity on S7.",
"lines": [
117,
123
],
"name": "MopIntensity",
"signature": "class MopIntensity(enum.Enum):",
"type": "class"
},
{
"doc": "Get mop scrub intensity setting.",
... | [
"miio/integrations/vacuum/roborock/tests/test_vacuum.py::TestVacuum::test_carpet_cleaning_mode",
"miio/integrations/vacuum/roborock/tests/test_vacuum.py::TestVacuum::test_goto",
"miio/integrations/vacuum/roborock/tests/test_vacuum.py::TestVacuum::test_history",
"miio/integrations/vacuum/roborock/tests/test_va... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add Roborock S7 mop scrub intensity
The Roborock S7 has the option to control the scrub intensity with four different settings:
- Close (raised position)
- Mild
- Moderate
- Intense
The way to control these settings is actually identical to the current waterflow code using `self.send("get_water_box_custom_mode")` and `self.send("set_water_box_custom_mode")`. My initial thought was to just add a new `MopIntensity` class since the names of the settings are different and add a case to the existing `waterflow` and `set_waterflow` methods, so if it's an S7 it uses the new `MopIntensity`. However, the method names don't really make sense (which I'm assuming we don't want to change to prevent breaking apps relying on this library) so my thought is the better option is just to duplicate the methods with a new name.
Addresses #1215
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in miio/integrations/vacuum/roborock/vacuum.py]
(definition of MopIntensity:)
class MopIntensity(enum.Enum):
"""Mop scrub intensity on S7."""
(definition of RoborockVacuum.mop_intensity:)
def mop_intensity(self) -> MopIntensity:
"""Get mop scrub intensity setting."""
(definition of RoborockVacuum.set_mop_intensity:)
def set_mop_intensity(self, mop_intensity: MopIntensity):
"""Set mop scrub intensity setting."""
[end of new definitions in miio/integrations/vacuum/roborock/vacuum.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 62427d2f796e603520acca3b57b29ec3e6489bca | ||
conan-io__conan-10154 | 10,154 | conan-io/conan | null | 51a077458b697dcd39e5eb64f1d33a6c52584081 | 2021-12-10T13:11:56Z | diff --git a/conan/tools/files/__init__.py b/conan/tools/files/__init__.py
index d6c2bf10208..7a84ecda977 100644
--- a/conan/tools/files/__init__.py
+++ b/conan/tools/files/__init__.py
@@ -3,3 +3,4 @@
from conan.tools.files.patches import patch, apply_conandata_patches
from conan.tools.files.cpp_package import CppPackage
from conan.tools.files.packager import AutoPackager
+from conan.tools.files.symlinks import symlinks
diff --git a/conan/tools/files/symlinks/__init__.py b/conan/tools/files/symlinks/__init__.py
new file mode 100644
index 00000000000..d826e74d6f8
--- /dev/null
+++ b/conan/tools/files/symlinks/__init__.py
@@ -0,0 +1,2 @@
+from conan.tools.files.symlinks.symlinks import absolute_to_relative_symlinks, \
+ remove_external_symlinks, remove_broken_symlinks, get_symlinks
diff --git a/conan/tools/files/symlinks/symlinks.py b/conan/tools/files/symlinks/symlinks.py
new file mode 100644
index 00000000000..98c4b2040a1
--- /dev/null
+++ b/conan/tools/files/symlinks/symlinks.py
@@ -0,0 +1,52 @@
+import os
+
+
+def get_symlinks(base_folder):
+ """Return the absolute path to the symlink files in base_folder"""
+ for (root, dirnames, filenames) in os.walk(base_folder):
+ for el in filenames + dirnames:
+ fullpath = os.path.join(root, el)
+ if os.path.islink(fullpath):
+ yield fullpath
+
+
+def _path_inside(base, folder):
+ base = os.path.abspath(base)
+ folder = os.path.abspath(folder)
+ return os.path.commonprefix([base, folder]) == base
+
+
+def absolute_to_relative_symlinks(conanfile, base_folder):
+ """Convert the symlinks with absolute paths to relative if they are pointing to a file or
+ directory inside the 'base_folder'. Any absolute symlink pointing outside the 'base_folder'
+ will be ignored"""
+ for fullpath in get_symlinks(base_folder):
+ link_target = os.readlink(fullpath)
+ if not os.path.isabs(link_target):
+ continue
+ folder_of_symlink = os.path.dirname(fullpath)
+ if _path_inside(base_folder, link_target):
+ os.unlink(fullpath)
+ new_link = os.path.relpath(link_target, folder_of_symlink)
+ os.symlink(new_link, fullpath)
+
+
+def remove_external_symlinks(conanfile, base_folder=None):
+ """Remove the symlinks to files that point outside the 'base_folder', no matter if relative or
+ absolute"""
+ for fullpath in get_symlinks(base_folder):
+ link_target = os.readlink(fullpath)
+ if not os.path.isabs(link_target):
+ link_target = os.path.join(base_folder, link_target)
+ if not _path_inside(base_folder, link_target):
+ os.unlink(fullpath)
+
+
+def remove_broken_symlinks(conanfile, base_folder=None):
+ """Remove the broken symlinks, no matter if relative or absolute"""
+ for fullpath in get_symlinks(base_folder):
+ link_target = os.readlink(fullpath)
+ if not os.path.isabs(link_target):
+ link_target = os.path.join(base_folder, link_target)
+ if not os.path.exists(link_target):
+ os.unlink(fullpath)
| diff --git a/conans/test/unittests/tools/files/test_symlinks.py b/conans/test/unittests/tools/files/test_symlinks.py
new file mode 100644
index 00000000000..7b7398a85b3
--- /dev/null
+++ b/conans/test/unittests/tools/files/test_symlinks.py
@@ -0,0 +1,122 @@
+import os
+
+import pytest
+
+from conan import tools
+from conan.tools.files import mkdir
+from conans.test.utils.test_files import temp_folder
+
+
+@pytest.fixture
+def folders():
+ tmp = temp_folder()
+ files = ["foo/var/file.txt"]
+ outside_folder = temp_folder()
+ symlinks = [
+ (os.path.join(tmp, "foo/var/file.txt"), "foo/var/other/absolute.txt"), # Absolute link
+ (os.path.join(tmp, "foo/var"), "foo/var/other/other/myfolder"), # Absolute link folder
+ (os.path.join(tmp, "foo/var/file.txt"), "foo/absolute.txt"), # Absolute link
+ ("foo/var/file.txt", "foo/var/other/relative.txt"), # Relative link
+ ("missing.txt", "foo/var/other/broken.txt"), # Broken link
+ (outside_folder, "foo/var/other/absolute_outside"), # Absolute folder outside the folder
+ ("../../../../../outside", "foo/absolute_outside"), # Relative folder outside the folder
+ ]
+ # Create the files and symlinks
+ for path in files:
+ mkdir(None, os.path.dirname(os.path.join(tmp, path)))
+ with open(os.path.join(tmp, path), "w") as fl:
+ fl.write("foo")
+
+ for link_dst, linked_file in symlinks:
+ mkdir(None, os.path.dirname(os.path.join(tmp, linked_file)))
+ os.symlink(link_dst, os.path.join(tmp, linked_file))
+ return tmp, outside_folder
+
+
+def test_absolute_to_relative_symlinks(folders):
+ """If a symlink is absolute but relative to a file or folder that is contained in
+ the base folder, we can make it relative"""
+
+ folder, outside_folder = folders
+ # Transform the absolute symlinks to relative
+ tools.files.symlinks.absolute_to_relative_symlinks(None, folder)
+
+ # Check the results
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/absolute.txt")).replace("\\", "/")
+ assert linked_to == "../file.txt"
+
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/other/myfolder")).replace("\\", "/")
+ assert linked_to == "../.."
+
+ linked_to = os.readlink(os.path.join(folder, "foo/absolute.txt")).replace("\\", "/")
+ assert linked_to == "var/file.txt"
+
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/relative.txt")).replace("\\", "/")
+ assert linked_to == "foo/var/file.txt"
+
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/broken.txt"))
+ assert linked_to == "missing.txt"
+
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/absolute_outside"))
+ assert linked_to == outside_folder
+
+
+def test_remove_external_symlinks(folders):
+
+ folder, outside_folder = folders
+ # Remove the external symlinks
+ tools.files.symlinks.remove_external_symlinks(None, folder)
+
+ # Check the results, these are kept the same
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/absolute.txt"))
+ assert linked_to == os.path.join(folder, "foo/var/file.txt")
+
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/other/myfolder"))
+ assert linked_to == os.path.join(folder, "foo/var")
+
+ linked_to = os.readlink(os.path.join(folder, "foo/absolute.txt"))
+ assert linked_to == os.path.join(folder, "foo/var/file.txt")
+
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/relative.txt"))
+ assert linked_to == "foo/var/file.txt"
+
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/broken.txt"))
+ assert linked_to == "missing.txt"
+
+ # This one is removed
+ assert not os.path.islink(os.path.join(folder, "foo/var/other/absolute_outside"))
+ assert not os.path.exists(os.path.join(folder, "foo/var/other/absolute_outside"))
+
+ # This one is removed
+ assert not os.path.islink(os.path.join(folder, "foo/absolute_outside"))
+ assert not os.path.exists(os.path.join(folder, "foo/absolute_outside"))
+
+
+def test_remove_broken_symlinks(folders):
+ folder, outside_folder = folders
+ # Remove the external symlinks
+ tools.files.symlinks.remove_broken_symlinks(None, folder)
+
+ # Check the results, these are kept the same
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/absolute.txt"))
+ assert linked_to == os.path.join(folder, "foo/var/file.txt")
+
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/other/myfolder"))
+ assert linked_to == os.path.join(folder, "foo/var")
+
+ linked_to = os.readlink(os.path.join(folder, "foo/absolute.txt"))
+ assert linked_to == os.path.join(folder, "foo/var/file.txt")
+
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/relative.txt"))
+ assert linked_to == "foo/var/file.txt"
+
+ # This one is removed
+ assert not os.path.islink(os.path.join(folder, "foo/var/other/broken.txt"))
+ assert not os.path.exists(os.path.join(folder, "foo/var/other/broken.txt"))
+
+ linked_to = os.readlink(os.path.join(folder, "foo/var/other/absolute_outside"))
+ assert linked_to == outside_folder
+
+ # This is broken also so it is also removed
+ assert not os.path.islink(os.path.join(folder, "foo/absolute_outside"))
+ assert not os.path.exists(os.path.join(folder, "foo/absolute_outside"))
| [
{
"components": [
{
"doc": "Return the absolute path to the symlink files in base_folder",
"lines": [
4,
10
],
"name": "get_symlinks",
"signature": "def get_symlinks(base_folder):",
"type": "function"
},
{
"doc": "",
... | [
"conans/test/unittests/tools/files/test_symlinks.py::test_absolute_to_relative_symlinks",
"conans/test/unittests/tools/files/test_symlinks.py::test_remove_external_symlinks",
"conans/test/unittests/tools/files/test_symlinks.py::test_remove_broken_symlinks"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Ported symlinks tools from #10125
Changelog: Feature: Provided several `conan.tools.files` functions to manage symlinks: Transform absolute to relative symlinks, remove broken symlinks, remove external symlinks and get the symlinks in a folder. These tools will help migrate to Conan 2.0 where the package files won't be automatically cleaned from broken absolute symlinks or external symlinks.
Docs: https://github.com/conan-io/docs/pull/2343
Pending merge of https://github.com/conan-io/conan/pull/10125
PENDING DOCUMENTATION:
- Document tools
- Document the migration guide
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in conan/tools/files/symlinks/symlinks.py]
(definition of get_symlinks:)
def get_symlinks(base_folder):
"""Return the absolute path to the symlink files in base_folder"""
(definition of _path_inside:)
def _path_inside(base, folder):
(definition of absolute_to_relative_symlinks:)
def absolute_to_relative_symlinks(conanfile, base_folder):
"""Convert the symlinks with absolute paths to relative if they are pointing to a file or
directory inside the 'base_folder'. Any absolute symlink pointing outside the 'base_folder'
will be ignored"""
(definition of remove_external_symlinks:)
def remove_external_symlinks(conanfile, base_folder=None):
"""Remove the symlinks to files that point outside the 'base_folder', no matter if relative or
absolute"""
(definition of remove_broken_symlinks:)
def remove_broken_symlinks(conanfile, base_folder=None):
"""Remove the broken symlinks, no matter if relative or absolute"""
[end of new definitions in conan/tools/files/symlinks/symlinks.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 4a5b19a75db9225316c8cb022a2dfb9705a2af34 | ||
pydata__xarray-6059 | 6,059 | pydata/xarray | 0.20 | 8f42bfd3a5fd0b1a351b535be207ed4771b02c8b | 2021-12-10T01:11:36Z | diff --git a/doc/api.rst b/doc/api.rst
index d2c222da4db..7fdd775e168 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -944,6 +944,7 @@ Dataset
DatasetWeighted
DatasetWeighted.mean
+ DatasetWeighted.quantile
DatasetWeighted.sum
DatasetWeighted.std
DatasetWeighted.var
@@ -958,6 +959,7 @@ DataArray
DataArrayWeighted
DataArrayWeighted.mean
+ DataArrayWeighted.quantile
DataArrayWeighted.sum
DataArrayWeighted.std
DataArrayWeighted.var
diff --git a/doc/user-guide/computation.rst b/doc/user-guide/computation.rst
index de2afa9060c..dc9748af80b 100644
--- a/doc/user-guide/computation.rst
+++ b/doc/user-guide/computation.rst
@@ -265,7 +265,7 @@ Weighted array reductions
:py:class:`DataArray` and :py:class:`Dataset` objects include :py:meth:`DataArray.weighted`
and :py:meth:`Dataset.weighted` array reduction methods. They currently
-support weighted ``sum``, ``mean``, ``std`` and ``var``.
+support weighted ``sum``, ``mean``, ``std``, ``var`` and ``quantile``.
.. ipython:: python
@@ -293,6 +293,12 @@ Calculate the weighted mean:
weighted_prec.mean(dim="month")
+Calculate the weighted quantile:
+
+.. ipython:: python
+
+ weighted_prec.quantile(q=0.5, dim="month")
+
The weighted sum corresponds to:
.. ipython:: python
diff --git a/doc/whats-new.rst b/doc/whats-new.rst
index 37cf3af85b9..a15618e9d1f 100644
--- a/doc/whats-new.rst
+++ b/doc/whats-new.rst
@@ -22,6 +22,9 @@ v2022.03.1 (unreleased)
New Features
~~~~~~~~~~~~
+- Add a weighted ``quantile`` method to :py:class:`~core.weighted.DatasetWeighted` and
+ :py:class:`~core.weighted.DataArrayWeighted` (:pull:`6059`). By
+ `Christian Jauvin <https://github.com/cjauvin>`_ and `David Huard <https://github.com/huard>`_.
- Add a ``create_index=True`` parameter to :py:meth:`Dataset.stack` and
:py:meth:`DataArray.stack` so that the creation of multi-indexes is optional
(:pull:`5692`). By `Benoît Bovy <https://github.com/benbovy>`_.
diff --git a/xarray/core/weighted.py b/xarray/core/weighted.py
index 83ce36bcb35..2e944eab1e0 100644
--- a/xarray/core/weighted.py
+++ b/xarray/core/weighted.py
@@ -1,14 +1,26 @@
from __future__ import annotations
-from typing import TYPE_CHECKING, Generic, Hashable, Iterable, cast
+from typing import TYPE_CHECKING, Generic, Hashable, Iterable, Literal, Sequence, cast
import numpy as np
-from . import duck_array_ops
-from .computation import dot
+from . import duck_array_ops, utils
+from .alignment import align, broadcast
+from .computation import apply_ufunc, dot
+from .npcompat import ArrayLike
from .pycompat import is_duck_dask_array
from .types import T_Xarray
+# Weighted quantile methods are a subset of the numpy supported quantile methods.
+QUANTILE_METHODS = Literal[
+ "linear",
+ "interpolated_inverted_cdf",
+ "hazen",
+ "weibull",
+ "median_unbiased",
+ "normal_unbiased",
+]
+
_WEIGHTED_REDUCE_DOCSTRING_TEMPLATE = """
Reduce this {cls}'s data by a weighted ``{fcn}`` along some dimension(s).
@@ -56,6 +68,61 @@
New {cls} object with the sum of the weights over the given dimension.
"""
+_WEIGHTED_QUANTILE_DOCSTRING_TEMPLATE = """
+ Apply a weighted ``quantile`` to this {cls}'s data along some dimension(s).
+
+ Weights are interpreted as *sampling weights* (or probability weights) and
+ describe how a sample is scaled to the whole population [1]_. There are
+ other possible interpretations for weights, *precision weights* describing the
+ precision of observations, or *frequency weights* counting the number of identical
+ observations, however, they are not implemented here.
+
+ For compatibility with NumPy's non-weighted ``quantile`` (which is used by
+ ``DataArray.quantile`` and ``Dataset.quantile``), the only interpolation
+ method supported by this weighted version corresponds to the default "linear"
+ option of ``numpy.quantile``. This is "Type 7" option, described in Hyndman
+ and Fan (1996) [2]_. The implementation is largely inspired by a blog post
+ from A. Akinshin's [3]_.
+
+ Parameters
+ ----------
+ q : float or sequence of float
+ Quantile to compute, which must be between 0 and 1 inclusive.
+ dim : str or sequence of str, optional
+ Dimension(s) over which to apply the weighted ``quantile``.
+ skipna : bool, optional
+ If True, skip missing values (as marked by NaN). By default, only
+ skips missing values for float dtypes; other dtypes either do not
+ have a sentinel missing value (int) or skipna=True has not been
+ implemented (object, datetime64 or timedelta64).
+ keep_attrs : bool, optional
+ If True, the attributes (``attrs``) will be copied from the original
+ object to the new one. If False (default), the new object will be
+ returned without attributes.
+
+ Returns
+ -------
+ quantiles : {cls}
+ New {cls} object with weighted ``quantile`` applied to its data and
+ the indicated dimension(s) removed.
+
+ See Also
+ --------
+ numpy.nanquantile, pandas.Series.quantile, Dataset.quantile, DataArray.quantile
+
+ Notes
+ -----
+ Returns NaN if the ``weights`` sum to 0.0 along the reduced
+ dimension(s).
+
+ References
+ ----------
+ .. [1] https://notstatschat.rbind.io/2020/08/04/weights-in-statistics/
+ .. [2] Hyndman, R. J. & Fan, Y. (1996). Sample Quantiles in Statistical Packages.
+ The American Statistician, 50(4), 361–365. https://doi.org/10.2307/2684934
+ .. [3] https://aakinshin.net/posts/weighted-quantiles
+ """
+
if TYPE_CHECKING:
from .dataarray import DataArray
@@ -241,6 +308,141 @@ def _weighted_std(
return cast("DataArray", np.sqrt(self._weighted_var(da, dim, skipna)))
+ def _weighted_quantile(
+ self,
+ da: DataArray,
+ q: ArrayLike,
+ dim: Hashable | Iterable[Hashable] | None = None,
+ skipna: bool = None,
+ ) -> DataArray:
+ """Apply a weighted ``quantile`` to a DataArray along some dimension(s)."""
+
+ def _get_h(n: float, q: np.ndarray, method: QUANTILE_METHODS) -> np.ndarray:
+ """Return the interpolation parameter."""
+ # Note that options are not yet exposed in the public API.
+ if method == "linear":
+ h = (n - 1) * q + 1
+ elif method == "interpolated_inverted_cdf":
+ h = n * q
+ elif method == "hazen":
+ h = n * q + 0.5
+ elif method == "weibull":
+ h = (n + 1) * q
+ elif method == "median_unbiased":
+ h = (n + 1 / 3) * q + 1 / 3
+ elif method == "normal_unbiased":
+ h = (n + 1 / 4) * q + 3 / 8
+ else:
+ raise ValueError(f"Invalid method: {method}.")
+ return h.clip(1, n)
+
+ def _weighted_quantile_1d(
+ data: np.ndarray,
+ weights: np.ndarray,
+ q: np.ndarray,
+ skipna: bool,
+ method: QUANTILE_METHODS = "linear",
+ ) -> np.ndarray:
+
+ # This algorithm has been adapted from:
+ # https://aakinshin.net/posts/weighted-quantiles/#reference-implementation
+ is_nan = np.isnan(data)
+ if skipna:
+ # Remove nans from data and weights
+ not_nan = ~is_nan
+ data = data[not_nan]
+ weights = weights[not_nan]
+ elif is_nan.any():
+ # Return nan if data contains any nan
+ return np.full(q.size, np.nan)
+
+ # Filter out data (and weights) associated with zero weights, which also flattens them
+ nonzero_weights = weights != 0
+ data = data[nonzero_weights]
+ weights = weights[nonzero_weights]
+ n = data.size
+
+ if n == 0:
+ # Possibly empty after nan or zero weight filtering above
+ return np.full(q.size, np.nan)
+
+ # Kish's effective sample size
+ nw = weights.sum() ** 2 / (weights**2).sum()
+
+ # Sort data and weights
+ sorter = np.argsort(data)
+ data = data[sorter]
+ weights = weights[sorter]
+
+ # Normalize and sum the weights
+ weights = weights / weights.sum()
+ weights_cum = np.append(0, weights.cumsum())
+
+ # Vectorize the computation by transposing q with respect to weights
+ q = np.atleast_2d(q).T
+
+ # Get the interpolation parameter for each q
+ h = _get_h(nw, q, method)
+
+ # Find the samples contributing to the quantile computation (at *positions* between (h-1)/nw and h/nw)
+ u = np.maximum((h - 1) / nw, np.minimum(h / nw, weights_cum))
+
+ # Compute their relative weight
+ v = u * nw - h + 1
+ w = np.diff(v)
+
+ # Apply the weights
+ return (data * w).sum(axis=1)
+
+ if skipna is None and da.dtype.kind in "cfO":
+ skipna = True
+
+ q = np.atleast_1d(np.asarray(q, dtype=np.float64))
+
+ if q.ndim > 1:
+ raise ValueError("q must be a scalar or 1d")
+
+ if np.any((q < 0) | (q > 1)):
+ raise ValueError("q values must be between 0 and 1")
+
+ if dim is None:
+ dim = da.dims
+
+ if utils.is_scalar(dim):
+ dim = [dim]
+
+ # To satisfy mypy
+ dim = cast(Sequence, dim)
+
+ # need to align *and* broadcast
+ # - `_weighted_quantile_1d` requires arrays with the same shape
+ # - broadcast does an outer join, which can introduce NaN to weights
+ # - therefore we first need to do align(..., join="inner")
+
+ # TODO: use broadcast(..., join="inner") once available
+ # see https://github.com/pydata/xarray/issues/6304
+
+ da, weights = align(da, self.weights, join="inner")
+ da, weights = broadcast(da, weights)
+
+ result = apply_ufunc(
+ _weighted_quantile_1d,
+ da,
+ weights,
+ input_core_dims=[dim, dim],
+ output_core_dims=[["quantile"]],
+ output_dtypes=[np.float64],
+ dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}),
+ dask="parallelized",
+ vectorize=True,
+ kwargs={"q": q, "skipna": skipna},
+ )
+
+ result = result.transpose("quantile", ...)
+ result = result.assign_coords(quantile=q).squeeze()
+
+ return result
+
def _implementation(self, func, dim, **kwargs):
raise NotImplementedError("Use `Dataset.weighted` or `DataArray.weighted`")
@@ -310,6 +512,19 @@ def std(
self._weighted_std, dim=dim, skipna=skipna, keep_attrs=keep_attrs
)
+ def quantile(
+ self,
+ q: ArrayLike,
+ *,
+ dim: Hashable | Sequence[Hashable] | None = None,
+ keep_attrs: bool = None,
+ skipna: bool = True,
+ ) -> T_Xarray:
+
+ return self._implementation(
+ self._weighted_quantile, q=q, dim=dim, skipna=skipna, keep_attrs=keep_attrs
+ )
+
def __repr__(self):
"""provide a nice str repr of our Weighted object"""
@@ -360,6 +575,8 @@ def _inject_docstring(cls, cls_name):
cls=cls_name, fcn="std", on_zero="NaN"
)
+ cls.quantile.__doc__ = _WEIGHTED_QUANTILE_DOCSTRING_TEMPLATE.format(cls=cls_name)
+
_inject_docstring(DataArrayWeighted, "DataArray")
_inject_docstring(DatasetWeighted, "Dataset")
| diff --git a/xarray/tests/test_weighted.py b/xarray/tests/test_weighted.py
index 1f065228bc4..63dd1ec0c94 100644
--- a/xarray/tests/test_weighted.py
+++ b/xarray/tests/test_weighted.py
@@ -194,6 +194,160 @@ def test_weighted_mean_no_nan(weights, expected):
assert_equal(expected, result)
+@pytest.mark.parametrize(
+ ("weights", "expected"),
+ (
+ (
+ [0.25, 0.05, 0.15, 0.25, 0.15, 0.1, 0.05],
+ [1.554595, 2.463784, 3.000000, 3.518378],
+ ),
+ (
+ [0.05, 0.05, 0.1, 0.15, 0.15, 0.25, 0.25],
+ [2.840000, 3.632973, 4.076216, 4.523243],
+ ),
+ ),
+)
+def test_weighted_quantile_no_nan(weights, expected):
+ # Expected values were calculated by running the reference implementation
+ # proposed in https://aakinshin.net/posts/weighted-quantiles/
+
+ da = DataArray([1, 1.9, 2.2, 3, 3.7, 4.1, 5])
+ q = [0.2, 0.4, 0.6, 0.8]
+ weights = DataArray(weights)
+
+ expected = DataArray(expected, coords={"quantile": q})
+ result = da.weighted(weights).quantile(q)
+
+ assert_allclose(expected, result)
+
+
+def test_weighted_quantile_zero_weights():
+
+ da = DataArray([0, 1, 2, 3])
+ weights = DataArray([1, 0, 1, 0])
+ q = 0.75
+
+ result = da.weighted(weights).quantile(q)
+ expected = DataArray([0, 2]).quantile(0.75)
+
+ assert_allclose(expected, result)
+
+
+def test_weighted_quantile_simple():
+ # Check that weighted quantiles return the same value as numpy quantiles
+ da = DataArray([0, 1, 2, 3])
+ w = DataArray([1, 0, 1, 0])
+
+ w_eps = DataArray([1, 0.0001, 1, 0.0001])
+ q = 0.75
+
+ expected = DataArray(np.quantile([0, 2], q), coords={"quantile": q}) # 1.5
+
+ assert_equal(expected, da.weighted(w).quantile(q))
+ assert_allclose(expected, da.weighted(w_eps).quantile(q), rtol=0.001)
+
+
+@pytest.mark.parametrize("skipna", (True, False))
+def test_weighted_quantile_nan(skipna):
+ # Check skipna behavior
+ da = DataArray([0, 1, 2, 3, np.nan])
+ w = DataArray([1, 0, 1, 0, 1])
+ q = [0.5, 0.75]
+
+ result = da.weighted(w).quantile(q, skipna=skipna)
+
+ if skipna:
+ expected = DataArray(np.quantile([0, 2], q), coords={"quantile": q})
+ else:
+ expected = DataArray(np.full(len(q), np.nan), coords={"quantile": q})
+
+ assert_allclose(expected, result)
+
+
+@pytest.mark.parametrize(
+ "da",
+ (
+ [1, 1.9, 2.2, 3, 3.7, 4.1, 5],
+ [1, 1.9, 2.2, 3, 3.7, 4.1, np.nan],
+ [np.nan, np.nan, np.nan],
+ ),
+)
+@pytest.mark.parametrize("q", (0.5, (0.2, 0.8)))
+@pytest.mark.parametrize("skipna", (True, False))
+@pytest.mark.parametrize("factor", [1, 3.14])
+def test_weighted_quantile_equal_weights(da, q, skipna, factor):
+ # if all weights are equal (!= 0), should yield the same result as quantile
+
+ da = DataArray(da)
+ weights = xr.full_like(da, factor)
+
+ expected = da.quantile(q, skipna=skipna)
+ result = da.weighted(weights).quantile(q, skipna=skipna)
+
+ assert_allclose(expected, result)
+
+
+@pytest.mark.skip(reason="`method` argument is not currently exposed")
+@pytest.mark.parametrize(
+ "da",
+ (
+ [1, 1.9, 2.2, 3, 3.7, 4.1, 5],
+ [1, 1.9, 2.2, 3, 3.7, 4.1, np.nan],
+ [np.nan, np.nan, np.nan],
+ ),
+)
+@pytest.mark.parametrize("q", (0.5, (0.2, 0.8)))
+@pytest.mark.parametrize("skipna", (True, False))
+@pytest.mark.parametrize(
+ "method",
+ [
+ "linear",
+ "interpolated_inverted_cdf",
+ "hazen",
+ "weibull",
+ "median_unbiased",
+ "normal_unbiased2",
+ ],
+)
+def test_weighted_quantile_equal_weights_all_methods(da, q, skipna, factor, method):
+ # If all weights are equal (!= 0), should yield the same result as numpy quantile
+
+ da = DataArray(da)
+ weights = xr.full_like(da, 3.14)
+
+ expected = da.quantile(q, skipna=skipna, method=method)
+ result = da.weighted(weights).quantile(q, skipna=skipna, method=method)
+
+ assert_allclose(expected, result)
+
+
+def test_weighted_quantile_bool():
+ # https://github.com/pydata/xarray/issues/4074
+ da = DataArray([1, 1])
+ weights = DataArray([True, True])
+ q = 0.5
+
+ expected = DataArray([1], coords={"quantile": [q]}).squeeze()
+ result = da.weighted(weights).quantile(q)
+
+ assert_equal(expected, result)
+
+
+@pytest.mark.parametrize("q", (-1, 1.1, (0.5, 1.1), ((0.2, 0.4), (0.6, 0.8))))
+def test_weighted_quantile_with_invalid_q(q):
+
+ da = DataArray([1, 1.9, 2.2, 3, 3.7, 4.1, 5])
+ q = np.asarray(q)
+ weights = xr.ones_like(da)
+
+ if q.ndim <= 1:
+ with pytest.raises(ValueError, match="q values must be between 0 and 1"):
+ da.weighted(weights).quantile(q)
+ else:
+ with pytest.raises(ValueError, match="q must be a scalar or 1d"):
+ da.weighted(weights).quantile(q)
+
+
@pytest.mark.parametrize(
("weights", "expected"), (([4, 6], 2.0), ([1, 0], np.nan), ([0, 0], np.nan))
)
@@ -466,16 +620,56 @@ def test_weighted_operations_3D(dim, add_nans, skipna):
check_weighted_operations(data, weights, dim, skipna)
-def test_weighted_operations_nonequal_coords():
+@pytest.mark.parametrize("dim", ("a", "b", "c", ("a", "b"), ("a", "b", "c"), None))
+@pytest.mark.parametrize("q", (0.5, (0.1, 0.9), (0.2, 0.4, 0.6, 0.8)))
+@pytest.mark.parametrize("add_nans", (True, False))
+@pytest.mark.parametrize("skipna", (None, True, False))
+def test_weighted_quantile_3D(dim, q, add_nans, skipna):
+
+ dims = ("a", "b", "c")
+ coords = dict(a=[0, 1, 2], b=[0, 1, 2, 3], c=[0, 1, 2, 3, 4])
+ data = np.arange(60).reshape(3, 4, 5).astype(float)
+
+ # add approximately 25 % NaNs (https://stackoverflow.com/a/32182680/3010700)
+ if add_nans:
+ c = int(data.size * 0.25)
+ data.ravel()[np.random.choice(data.size, c, replace=False)] = np.NaN
+
+ da = DataArray(data, dims=dims, coords=coords)
+
+ # Weights are all ones, because we will compare against DataArray.quantile (non-weighted)
+ weights = xr.ones_like(da)
+
+ result = da.weighted(weights).quantile(q, dim=dim, skipna=skipna)
+ expected = da.quantile(q, dim=dim, skipna=skipna)
+
+ assert_allclose(expected, result)
+
+ ds = da.to_dataset(name="data")
+ result2 = ds.weighted(weights).quantile(q, dim=dim, skipna=skipna)
+
+ assert_allclose(expected, result2.data)
+
+
+def test_weighted_operations_nonequal_coords():
+ # There are no weights for a == 4, so that data point is ignored.
weights = DataArray(np.random.randn(4), dims=("a",), coords=dict(a=[0, 1, 2, 3]))
data = DataArray(np.random.randn(4), dims=("a",), coords=dict(a=[1, 2, 3, 4]))
-
check_weighted_operations(data, weights, dim="a", skipna=None)
+ q = 0.5
+ result = data.weighted(weights).quantile(q, dim="a")
+ # Expected value computed using code from https://aakinshin.net/posts/weighted-quantiles/ with values at a=1,2,3
+ expected = DataArray([0.9308707], coords={"quantile": [q]}).squeeze()
+ assert_allclose(result, expected)
+
data = data.to_dataset(name="data")
check_weighted_operations(data, weights, dim="a", skipna=None)
+ result = data.weighted(weights).quantile(q, dim="a")
+ assert_allclose(result, expected.to_dataset(name="data"))
+
@pytest.mark.parametrize("shape_data", ((4,), (4, 4), (4, 4, 4)))
@pytest.mark.parametrize("shape_weights", ((4,), (4, 4), (4, 4, 4)))
@@ -506,7 +700,8 @@ def test_weighted_operations_different_shapes(
@pytest.mark.parametrize(
- "operation", ("sum_of_weights", "sum", "mean", "sum_of_squares", "var", "std")
+ "operation",
+ ("sum_of_weights", "sum", "mean", "sum_of_squares", "var", "std", "quantile"),
)
@pytest.mark.parametrize("as_dataset", (True, False))
@pytest.mark.parametrize("keep_attrs", (True, False, None))
@@ -520,22 +715,23 @@ def test_weighted_operations_keep_attr(operation, as_dataset, keep_attrs):
data.attrs = dict(attr="weights")
- result = getattr(data.weighted(weights), operation)(keep_attrs=True)
+ kwargs = {"keep_attrs": keep_attrs}
+ if operation == "quantile":
+ kwargs["q"] = 0.5
+
+ result = getattr(data.weighted(weights), operation)(**kwargs)
if operation == "sum_of_weights":
- assert weights.attrs == result.attrs
+ assert result.attrs == (weights.attrs if keep_attrs else {})
+ assert result.attrs == (weights.attrs if keep_attrs else {})
else:
- assert data.attrs == result.attrs
-
- result = getattr(data.weighted(weights), operation)(keep_attrs=None)
- assert not result.attrs
-
- result = getattr(data.weighted(weights), operation)(keep_attrs=False)
- assert not result.attrs
+ assert result.attrs == (weights.attrs if keep_attrs else {})
+ assert result.attrs == (data.attrs if keep_attrs else {})
@pytest.mark.parametrize(
- "operation", ("sum_of_weights", "sum", "mean", "sum_of_squares", "var", "std")
+ "operation",
+ ("sum_of_weights", "sum", "mean", "sum_of_squares", "var", "std", "quantile"),
)
def test_weighted_operations_keep_attr_da_in_ds(operation):
# GH #3595
@@ -544,22 +740,31 @@ def test_weighted_operations_keep_attr_da_in_ds(operation):
data = DataArray(np.random.randn(2, 2), attrs=dict(attr="data"))
data = data.to_dataset(name="a")
- result = getattr(data.weighted(weights), operation)(keep_attrs=True)
+ kwargs = {"keep_attrs": True}
+ if operation == "quantile":
+ kwargs["q"] = 0.5
+
+ result = getattr(data.weighted(weights), operation)(**kwargs)
assert data.a.attrs == result.a.attrs
+@pytest.mark.parametrize("operation", ("sum_of_weights", "sum", "mean", "quantile"))
@pytest.mark.parametrize("as_dataset", (True, False))
-def test_weighted_bad_dim(as_dataset):
+def test_weighted_bad_dim(operation, as_dataset):
data = DataArray(np.random.randn(2, 2))
weights = xr.ones_like(data)
if as_dataset:
data = data.to_dataset(name="data")
+ kwargs = {"dim": "bad_dim"}
+ if operation == "quantile":
+ kwargs["q"] = 0.5
+
error_msg = (
f"{data.__class__.__name__}Weighted"
" does not contain the dimensions: {'bad_dim'}"
)
with pytest.raises(ValueError, match=error_msg):
- data.weighted(weights).mean("bad_dim")
+ getattr(data.weighted(weights), operation)(**kwargs)
| diff --git a/doc/api.rst b/doc/api.rst
index d2c222da4db..7fdd775e168 100644
--- a/doc/api.rst
+++ b/doc/api.rst
@@ -944,6 +944,7 @@ Dataset
DatasetWeighted
DatasetWeighted.mean
+ DatasetWeighted.quantile
DatasetWeighted.sum
DatasetWeighted.std
DatasetWeighted.var
@@ -958,6 +959,7 @@ DataArray
DataArrayWeighted
DataArrayWeighted.mean
+ DataArrayWeighted.quantile
DataArrayWeighted.sum
DataArrayWeighted.std
DataArrayWeighted.var
diff --git a/doc/user-guide/computation.rst b/doc/user-guide/computation.rst
index de2afa9060c..dc9748af80b 100644
--- a/doc/user-guide/computation.rst
+++ b/doc/user-guide/computation.rst
@@ -265,7 +265,7 @@ Weighted array reductions
:py:class:`DataArray` and :py:class:`Dataset` objects include :py:meth:`DataArray.weighted`
and :py:meth:`Dataset.weighted` array reduction methods. They currently
-support weighted ``sum``, ``mean``, ``std`` and ``var``.
+support weighted ``sum``, ``mean``, ``std``, ``var`` and ``quantile``.
.. ipython:: python
@@ -293,6 +293,12 @@ Calculate the weighted mean:
weighted_prec.mean(dim="month")
+Calculate the weighted quantile:
+
+.. ipython:: python
+
+ weighted_prec.quantile(q=0.5, dim="month")
+
The weighted sum corresponds to:
.. ipython:: python
diff --git a/doc/whats-new.rst b/doc/whats-new.rst
index 37cf3af85b9..a15618e9d1f 100644
--- a/doc/whats-new.rst
+++ b/doc/whats-new.rst
@@ -22,6 +22,9 @@ v2022.03.1 (unreleased)
New Features
~~~~~~~~~~~~
+- Add a weighted ``quantile`` method to :py:class:`~core.weighted.DatasetWeighted` and
+ :py:class:`~core.weighted.DataArrayWeighted` (:pull:`6059`). By
+ `Christian Jauvin <https://github.com/cjauvin>`_ and `David Huard <https://github.com/huard>`_.
- Add a ``create_index=True`` parameter to :py:meth:`Dataset.stack` and
:py:meth:`DataArray.stack` so that the creation of multi-indexes is optional
(:pull:`5692`). By `Benoît Bovy <https://github.com/benbovy>`_.
| [
{
"components": [
{
"doc": "Apply a weighted ``quantile`` to a DataArray along some dimension(s).",
"lines": [
311,
444
],
"name": "Weighted._weighted_quantile",
"signature": "def _weighted_quantile( self, da: DataArray, q: ArrayLike, dim: Hashab... | [
"xarray/tests/test_weighted.py::test_weighted_quantile_no_nan[weights0-expected0]",
"xarray/tests/test_weighted.py::test_weighted_quantile_no_nan[weights1-expected1]",
"xarray/tests/test_weighted.py::test_weighted_quantile_zero_weights",
"xarray/tests/test_weighted.py::test_weighted_quantile_simple",
"xarra... | [
"xarray/tests/test_weighted.py::test_weighted_non_DataArray_weights[True]",
"xarray/tests/test_weighted.py::test_weighted_non_DataArray_weights[False]",
"xarray/tests/test_weighted.py::test_weighted_weights_nan_raises[weights0-True]",
"xarray/tests/test_weighted.py::test_weighted_weights_nan_raises[weights0-F... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Weighted quantile
- [x] Tests added
- [x] Passes `pre-commit run --all-files`
- [x] User visible changes (including notable bug fixes) are documented in `whats-new.rst`
- [x] New functions/methods are listed in `api.rst`
This is a follow-up to https://github.com/pydata/xarray/pull/5870/, which adds a weighted `quantile` function.
The question of how to precisely define the weighted quantile function is surprisingly complex, and this implementation offers a compromise in terms of simplicity and compatibility:
* The only interpolation method supported is the so-called "Type 7", as explained in https://aakinshin.net/posts/weighted-quantiles/, which proposes an R implementation, that I have adapted
* It turns out that Type 7 is apparently the most "popular" one, at least in the Python world: it corresponds to the default `linear` interpolation option of `numpy.quantile` (https://numpy.org/doc/stable/reference/generated/numpy.quantile.html) which is also the basis of xarray's already existing non-weighted quantile function
* I have taken care in making sure that the results of this new function, with equal weights, are equivalent to the ones of the already existing, non-weighted function (when used with its default interporlation option)
The interpolation question is so complex and confusing that entire articles have been written about it, as mentioned in the blog post above, in particular this one, which establishes the "nine types" taxoxomy, used, implicitly or not, by many software packages: https://doi.org/10.2307/2684934.
The situation seems even more complex in the NumPy world, where many discussions and suggestions are aimed toward trying to improve the consistency of the API. The current non-weighted situation has the 9 options, as well as 4 extra legacy ones: https://github.com/numpy/numpy/blob/376ad691fe4df77e502108d279872f56b30376dc/numpy/lib/function_base.py#L4177-L4203
This PR cuts the Gordian knot by offering only one interpolation option, but.. given that its implementation is based on `apply_ufunc` (in a very similar way to xarray's already existing non-weighted `quantile` function, which is also using `apply_ufunc` with `np.quantile`), in the event that `np.quantile` ever gains a `weights` keyword argument, it would be very easy to swap it. That way, xarray's weighted `quantile` could lose a little bit of code, and gain a plethora of interpolation options.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in xarray/core/weighted.py]
(definition of Weighted._weighted_quantile:)
def _weighted_quantile( self, da: DataArray, q: ArrayLike, dim: Hashable | Iterable[Hashable] | None = None, skipna: bool = None, ) -> DataArray:
"""Apply a weighted ``quantile`` to a DataArray along some dimension(s)."""
(definition of Weighted._weighted_quantile._get_h:)
def _get_h(n: float, q: np.ndarray, method: QUANTILE_METHODS) -> np.ndarray:
"""Return the interpolation parameter."""
(definition of Weighted._weighted_quantile._weighted_quantile_1d:)
def _weighted_quantile_1d( data: np.ndarray, weights: np.ndarray, q: np.ndarray, skipna: bool, method: QUANTILE_METHODS = "linear", ) -> np.ndarray:
(definition of Weighted.quantile:)
def quantile( self, q: ArrayLike, *, dim: Hashable | Sequence[Hashable] | None = None, keep_attrs: bool = None, skipna: bool = True, ) -> T_Xarray:
[end of new definitions in xarray/core/weighted.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 48290fa14accd3ac87768d3f73d69493b82b0be6 | |
Project-MONAI__MONAI-3456 | 3,456 | Project-MONAI/MONAI | null | 4b5ad0b3dc926c17d9478ae53f42da7949a2c3e0 | 2021-12-08T11:12:14Z | diff --git a/docs/source/data.rst b/docs/source/data.rst
index 0ab64edb7b..e8c68de853 100644
--- a/docs/source/data.rst
+++ b/docs/source/data.rst
@@ -21,6 +21,12 @@ Generic Interfaces
:members:
:special-members: __next__
+`DatasetFunc`
+~~~~~~~~~~~~~
+.. autoclass:: DatasetFunc
+ :members:
+ :special-members: __next__
+
`ShuffleBuffer`
~~~~~~~~~~~~~~~
.. autoclass:: ShuffleBuffer
diff --git a/monai/data/__init__.py b/monai/data/__init__.py
index e7fa2b3107..b12a307663 100644
--- a/monai/data/__init__.py
+++ b/monai/data/__init__.py
@@ -17,6 +17,7 @@
CacheNTransDataset,
CSVDataset,
Dataset,
+ DatasetFunc,
LMDBDataset,
NPZDictItemDataset,
PersistentDataset,
diff --git a/monai/data/dataset.py b/monai/data/dataset.py
index 38371f384b..ccd831ee0f 100644
--- a/monai/data/dataset.py
+++ b/monai/data/dataset.py
@@ -97,6 +97,56 @@ def __getitem__(self, index: Union[int, slice, Sequence[int]]):
return self._transform(index)
+class DatasetFunc(Dataset):
+ """
+ Execute function on the input dataset and leverage the output to act as a new Dataset.
+ It can be used to load / fetch the basic dataset items, like the list of `image, label` paths.
+ Or chain together to execute more complicated logic, like `partition_dataset`, `resample_datalist`, etc.
+ The `data` arg of `Dataset` will be applied to the first arg of callable `func`.
+ Usage example::
+
+ data_list = DatasetFunc(
+ data="path to file",
+ func=monai.data.load_decathlon_datalist,
+ data_list_key="validation",
+ base_dir="path to base dir",
+ )
+ # partition dataset for every rank
+ data_partition = DatasetFunc(
+ data=data_list,
+ func=lambda **kwargs: monai.data.partition_dataset(**kwargs)[torch.distributed.get_rank()],
+ num_partitions=torch.distributed.get_world_size(),
+ )
+ dataset = Dataset(data=data_partition, transform=transforms)
+
+ Args:
+ data: input data for the func to process, will apply to `func` as the first arg.
+ func: callable function to generate dataset items.
+ kwargs: other arguments for the `func` except for the first arg.
+
+ """
+
+ def __init__(self, data: Any, func: Callable, **kwargs) -> None:
+ super().__init__(data=None, transform=None) # type:ignore
+ self.src = data
+ self.func = func
+ self.kwargs = kwargs
+ self.reset()
+
+ def reset(self, data: Optional[Any] = None, func: Optional[Callable] = None, **kwargs):
+ """
+ Reset the dataset items with specified `func`.
+
+ Args:
+ data: if not None, execute `func` on it, default to `self.src`.
+ func: if not None, execute the `func` with specified `kwargs`, default to `self.func`.
+ kwargs: other arguments for the `func` except for the first arg.
+
+ """
+ src = self.src if data is None else data
+ self.data = self.func(src, **self.kwargs) if func is None else func(src, **kwargs)
+
+
class PersistentDataset(Dataset):
"""
Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,
| diff --git a/tests/test_dataset_func.py b/tests/test_dataset_func.py
new file mode 100644
index 0000000000..b3f6b95403
--- /dev/null
+++ b/tests/test_dataset_func.py
@@ -0,0 +1,52 @@
+# Copyright 2020 - 2021 MONAI Consortium
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+import tempfile
+import unittest
+
+from monai.data import Dataset, DatasetFunc, load_decathlon_datalist, partition_dataset
+
+
+class TestDatasetFunc(unittest.TestCase):
+ def test_seg_values(self):
+ with tempfile.TemporaryDirectory() as tempdir:
+ # prepare test datalist file
+ test_data = {
+ "name": "Spleen",
+ "description": "Spleen Segmentation",
+ "labels": {"0": "background", "1": "spleen"},
+ "training": [
+ {"image": "spleen_19.nii.gz", "label": "spleen_19.nii.gz"},
+ {"image": "spleen_31.nii.gz", "label": "spleen_31.nii.gz"},
+ ],
+ "test": ["spleen_15.nii.gz", "spleen_23.nii.gz"],
+ }
+ json_str = json.dumps(test_data)
+ file_path = os.path.join(tempdir, "test_data.json")
+ with open(file_path, "w") as json_file:
+ json_file.write(json_str)
+
+ data_list = DatasetFunc(
+ data=file_path, func=load_decathlon_datalist, data_list_key="training", base_dir=tempdir
+ )
+ # partition dataset for train / validation
+ data_partition = DatasetFunc(
+ data=data_list, func=lambda x, **kwargs: partition_dataset(x, **kwargs)[0], num_partitions=2
+ )
+ dataset = Dataset(data=data_partition, transform=None)
+ self.assertEqual(dataset[0]["image"], os.path.join(tempdir, "spleen_19.nii.gz"))
+ self.assertEqual(dataset[0]["label"], os.path.join(tempdir, "spleen_19.nii.gz"))
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/test_scale_intensity_range_percentilesd.py b/tests/test_scale_intensity_range_percentilesd.py
index ac2118d99f..0fcda21feb 100644
--- a/tests/test_scale_intensity_range_percentilesd.py
+++ b/tests/test_scale_intensity_range_percentilesd.py
@@ -35,7 +35,7 @@ def test_scaling(self):
scaler = ScaleIntensityRangePercentilesd(
keys=data.keys(), lower=lower, upper=upper, b_min=b_min, b_max=b_max
)
- assert_allclose(p(expected), scaler(data)["img"])
+ assert_allclose(p(expected), scaler(data)["img"], rtol=1e-4)
def test_relative_scaling(self):
img = self.imt
| diff --git a/docs/source/data.rst b/docs/source/data.rst
index 0ab64edb7b..e8c68de853 100644
--- a/docs/source/data.rst
+++ b/docs/source/data.rst
@@ -21,6 +21,12 @@ Generic Interfaces
:members:
:special-members: __next__
+`DatasetFunc`
+~~~~~~~~~~~~~
+.. autoclass:: DatasetFunc
+ :members:
+ :special-members: __next__
+
`ShuffleBuffer`
~~~~~~~~~~~~~~~
.. autoclass:: ShuffleBuffer
| [
{
"components": [
{
"doc": "Execute function on the input dataset and leverage the output to act as a new Dataset.\nIt can be used to load / fetch the basic dataset items, like the list of `image, label` paths.\nOr chain together to execute more complicated logic, like `partition_dataset`, `resamp... | [
"tests/test_dataset_func.py::TestDatasetFunc::test_seg_values",
"tests/test_scale_intensity_range_percentilesd.py::TestScaleIntensityRangePercentilesd::test_invalid_instantiation",
"tests/test_scale_intensity_range_percentilesd.py::TestScaleIntensityRangePercentilesd::test_relative_scaling",
"tests/test_scale... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
3444 Add DatasetFunc
Fixes #3444 .
### Description
Thanks for @wyli 's sharing, this PR added the `DatasetFunc` referring to:
https://github.com/webdataset/webdataset/blob/master/notebooks/howitworks.ipynb
### Status
**Ready**
### Types of changes
<!--- Put an `x` in all the boxes that apply, and remove the not applicable items -->
- [x] Non-breaking change (fix or new feature that would not break existing functionality).
- [ ] Breaking change (fix or new feature that would cause existing functionality to change).
- [ ] New tests added to cover the changes.
- [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`.
- [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests`.
- [ ] In-line docstrings updated.
- [ ] Documentation updated, tested `make html` command in the `docs/` folder.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in monai/data/dataset.py]
(definition of DatasetFunc:)
class DatasetFunc(Dataset):
"""Execute function on the input dataset and leverage the output to act as a new Dataset.
It can be used to load / fetch the basic dataset items, like the list of `image, label` paths.
Or chain together to execute more complicated logic, like `partition_dataset`, `resample_datalist`, etc.
The `data` arg of `Dataset` will be applied to the first arg of callable `func`.
Usage example::
data_list = DatasetFunc(
data="path to file",
func=monai.data.load_decathlon_datalist,
data_list_key="validation",
base_dir="path to base dir",
)
# partition dataset for every rank
data_partition = DatasetFunc(
data=data_list,
func=lambda **kwargs: monai.data.partition_dataset(**kwargs)[torch.distributed.get_rank()],
num_partitions=torch.distributed.get_world_size(),
)
dataset = Dataset(data=data_partition, transform=transforms)
Args:
data: input data for the func to process, will apply to `func` as the first arg.
func: callable function to generate dataset items.
kwargs: other arguments for the `func` except for the first arg."""
(definition of DatasetFunc.__init__:)
def __init__(self, data: Any, func: Callable, **kwargs) -> None:
(definition of DatasetFunc.reset:)
def reset(self, data: Optional[Any] = None, func: Optional[Callable] = None, **kwargs):
"""Reset the dataset items with specified `func`.
Args:
data: if not None, execute `func` on it, default to `self.src`.
func: if not None, execute the `func` with specified `kwargs`, default to `self.func`.
kwargs: other arguments for the `func` except for the first arg."""
[end of new definitions in monai/data/dataset.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Add DecathlonDatalist to enhance decathlon style loading
**Is your feature request related to a problem? Please describe.**
Currently, we already have several useful utilities to load or sample the decathlon datalist, would be nice to add a class to combine and enhance the feature.
We already have a reference implementation in NVIDIA Clara and can config it in the JSON file:
```
{"datalist": {"name": "DecathlonDatalist", "args": {XXX}}},
{"dataset": {"name": "CacheDataset", "args": {"data": "@datalist"}}}
```
I will try to reimplement it according to MONAI style.
----------
--------------------
</issues> | e73257caa79309dcce1e93abf1632f4bfd75b11f |
huggingface__datasets-3387 | 3,387 | huggingface/datasets | null | d3c7b9481d427ce41256edaf6773c47570f06f3b | 2021-12-06T07:56:07Z | diff --git a/src/datasets/tasks/__init__.py b/src/datasets/tasks/__init__.py
index f3053c2cfdf..7ff448b72e4 100644
--- a/src/datasets/tasks/__init__.py
+++ b/src/datasets/tasks/__init__.py
@@ -4,29 +4,32 @@
from .automatic_speech_recognition import AutomaticSpeechRecognition
from .base import TaskTemplate
from .image_classification import ImageClassification
+from .language_modeling import LanguageModeling
from .question_answering import QuestionAnsweringExtractive
from .summarization import Summarization
from .text_classification import TextClassification
__all__ = [
- "TaskTemplate",
- "QuestionAnsweringExtractive",
- "TextClassification",
- "Summarization",
"AutomaticSpeechRecognition",
"ImageClassification",
+ "LanguageModeling",
+ "QuestionAnsweringExtractive",
+ "Summarization",
+ "TaskTemplate",
+ "TextClassification",
]
logger = get_logger(__name__)
NAME2TEMPLATE = {
- QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
- TextClassification.task: TextClassification,
AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
- Summarization.task: Summarization,
ImageClassification.task: ImageClassification,
+ LanguageModeling.task: LanguageModeling,
+ QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
+ Summarization.task: Summarization,
+ TextClassification.task: TextClassification,
}
diff --git a/src/datasets/tasks/language_modeling.py b/src/datasets/tasks/language_modeling.py
new file mode 100644
index 00000000000..4f0dfedd149
--- /dev/null
+++ b/src/datasets/tasks/language_modeling.py
@@ -0,0 +1,18 @@
+from dataclasses import dataclass
+from typing import ClassVar, Dict
+
+from ..features import Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class LanguageModeling(TaskTemplate):
+ task: str = "language-modeling"
+
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({})
+ text_column: str = "text"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.text_column: "text"}
| diff --git a/tests/test_tasks.py b/tests/test_tasks.py
index 118b2872d65..7fbf5ce1007 100644
--- a/tests/test_tasks.py
+++ b/tests/test_tasks.py
@@ -7,6 +7,7 @@
from datasets.tasks import (
AutomaticSpeechRecognition,
ImageClassification,
+ LanguageModeling,
QuestionAnsweringExtractive,
Summarization,
TextClassification,
@@ -22,6 +23,19 @@
}
+class TestLanguageModeling:
+ def test_column_mapping(self):
+ task = LanguageModeling(text_column="input_text")
+ assert {"input_text": "text"} == task.column_mapping
+
+ def test_from_dict(self):
+ input_schema = Features({"text": Value("string")})
+ template_dict = {"text_column": "input_text"}
+ task = LanguageModeling.from_dict(template_dict)
+ assert "language-modeling" == task.task
+ assert input_schema == task.input_schema
+
+
class TextClassificationTest(TestCase):
def setUp(self):
self.labels = sorted(["pos", "neg"])
| [
{
"components": [
{
"doc": "",
"lines": [
9,
18
],
"name": "LanguageModeling",
"signature": "class LanguageModeling(TaskTemplate):",
"type": "class"
},
{
"doc": "",
"lines": [
17,
18
... | [
"tests/test_tasks.py::TestLanguageModeling::test_column_mapping",
"tests/test_tasks.py::TestLanguageModeling::test_from_dict",
"tests/test_tasks.py::TextClassificationTest::test_column_mapping",
"tests/test_tasks.py::TextClassificationTest::test_from_dict",
"tests/test_tasks.py::TextClassificationTest::test... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Create Language Modeling task
Create Language Modeling task to be able to specify the input "text" column in a dataset.
This can be useful for datasets which are not exclusively used for language modeling and have more than one column:
- for text classification datasets (with columns "review" and "rating", for example), the Language Modeling task can be used to specify the "text" column ("review" in this case).
TODO:
- [ ] Add the LanguageModeling task to all dataset scripts which can be used for language modeling
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/datasets/tasks/language_modeling.py]
(definition of LanguageModeling:)
class LanguageModeling(TaskTemplate):
(definition of LanguageModeling.column_mapping:)
def column_mapping(self) -> Dict[str, str]:
[end of new definitions in src/datasets/tasks/language_modeling.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 5142a8cf61d8a4495eda3d91dc4283a6df01ea14 | ||
rytilahti__python-miio-1191 | 1,191 | rytilahti/python-miio | null | 9ba55a9e33ced679449a98567929598d1461987a | 2021-11-28T23:52:53Z | diff --git a/miio/__init__.py b/miio/__init__.py
index 5d6bf97c4..a78538e75 100644
--- a/miio/__init__.py
+++ b/miio/__init__.py
@@ -43,7 +43,7 @@
from miio.integrations.petwaterdispenser import PetWaterDispenser
from miio.integrations.vacuum.dreame.dreamevacuum_miot import DreameVacuumMiot
from miio.integrations.vacuum.mijia import G1Vacuum
-from miio.integrations.vacuum.roborock import Vacuum, VacuumException
+from miio.integrations.vacuum.roborock import RoborockVacuum, Vacuum, VacuumException
from miio.integrations.vacuum.roborock.vacuumcontainers import (
CleaningDetails,
CleaningSummary,
diff --git a/miio/integrations/vacuum/roborock/__init__.py b/miio/integrations/vacuum/roborock/__init__.py
index 586bdd008..26d58d8b7 100644
--- a/miio/integrations/vacuum/roborock/__init__.py
+++ b/miio/integrations/vacuum/roborock/__init__.py
@@ -1,2 +1,2 @@
# flake8: noqa
-from .vacuum import Vacuum, VacuumException, VacuumStatus
+from .vacuum import RoborockVacuum, Vacuum, VacuumException, VacuumStatus
diff --git a/miio/integrations/vacuum/roborock/vacuum.py b/miio/integrations/vacuum/roborock/vacuum.py
index 519f95653..f35e3bdca 100644
--- a/miio/integrations/vacuum/roborock/vacuum.py
+++ b/miio/integrations/vacuum/roborock/vacuum.py
@@ -22,6 +22,7 @@
)
from miio.device import Device, DeviceInfo
from miio.exceptions import DeviceException, DeviceInfoUnavailableException
+from miio.utils import deprecated
from .vacuumcontainers import (
CarpetModeStatus,
@@ -140,8 +141,8 @@ class CarpetCleaningMode(enum.Enum):
]
-class Vacuum(Device):
- """Main class representing the vacuum."""
+class RoborockVacuum(Device):
+ """Main class for roborock vacuums (roborock.vacuum.*)."""
_supported_models = SUPPORTED_MODELS
@@ -886,3 +887,13 @@ def cleanup(vac: Vacuum, *args, **kwargs):
json.dump(seqs, f)
return dg
+
+
+class Vacuum(RoborockVacuum):
+ """Main class for roborock vacuums."""
+
+ @deprecated(
+ "This class will become the base class for all vacuum implementations. Use RoborockVacuum to control roborock vacuums."
+ )
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
diff --git a/miio/integrations/vacuum/roborock/vacuum_cli.py b/miio/integrations/vacuum/roborock/vacuum_cli.py
index 9cf324439..a7659c7ed 100644
--- a/miio/integrations/vacuum/roborock/vacuum_cli.py
+++ b/miio/integrations/vacuum/roborock/vacuum_cli.py
@@ -13,23 +13,24 @@
from appdirs import user_cache_dir
from tqdm import tqdm
-import miio # noqa: E402
from miio.click_common import (
ExceptionHandlerGroup,
LiteralParamType,
validate_ip,
validate_token,
)
-from miio.device import UpdateState
+from miio.device import Device, UpdateState
from miio.exceptions import DeviceInfoUnavailableException
from miio.miioprotocol import MiIOProtocol
from miio.updater import OneShotServer
-from .vacuum import CarpetCleaningMode
+from .vacuum import CarpetCleaningMode, Consumable, RoborockVacuum, TimerState
from .vacuum_tui import VacuumTUI
+from miio.discovery import Discovery
+
_LOGGER = logging.getLogger(__name__)
-pass_dev = click.make_pass_decorator(miio.Device, ensure=True)
+pass_dev = click.make_pass_decorator(Device, ensure=True)
@click.group(invoke_without_command=True, cls=ExceptionHandlerGroup)
@@ -60,7 +61,6 @@ def cli(ctx, ip: str, token: str, debug: int, id_file: str):
click.echo("You have to give ip and token!")
sys.exit(-1)
- start_id = manual_seq = 0
with contextlib.suppress(FileNotFoundError, TypeError, ValueError), open(
id_file, "r"
) as f:
@@ -69,7 +69,7 @@ def cli(ctx, ip: str, token: str, debug: int, id_file: str):
manual_seq = x.get("manual_seq", 0)
_LOGGER.debug("Read stored sequence ids: %s", x)
- vac = miio.Vacuum(ip, token, start_id, debug)
+ vac = RoborockVacuum(ip, token, start_id, debug)
vac.manual_seqnum = manual_seq
_LOGGER.debug("Connecting to %s with token %s", ip, token)
@@ -83,7 +83,7 @@ def cli(ctx, ip: str, token: str, debug: int, id_file: str):
@cli.resultcallback()
@pass_dev
-def cleanup(vac: miio.Vacuum, *args, **kwargs):
+def cleanup(vac: RoborockVacuum, *args, **kwargs):
if vac.ip is None: # dummy Device for discovery, skip teardown
return
id_file = kwargs["id_file"]
@@ -105,12 +105,12 @@ def discover(handshake):
if handshake:
MiIOProtocol.discover()
else:
- miio.Discovery.discover_mdns()
+ Discovery.discover_mdns()
@cli.command()
@pass_dev
-def status(vac: miio.Vacuum):
+def status(vac: RoborockVacuum):
"""Returns the state information."""
res = vac.status()
if not res:
@@ -125,9 +125,6 @@ def status(vac: miio.Vacuum):
click.echo("Fanspeed: %s %%" % res.fanspeed)
click.echo("Cleaning since: %s" % res.clean_time)
click.echo("Cleaned area: %s m²" % res.clean_area)
- # click.echo("DND enabled: %s" % res.dnd)
- # click.echo("Map present: %s" % res.map)
- # click.echo("in_cleaning: %s" % res.in_cleaning)
click.echo("Water box attached: %s" % res.is_water_box_attached)
if res.is_water_box_carriage_attached is not None:
click.echo("Mop attached: %s" % res.is_water_box_carriage_attached)
@@ -135,7 +132,7 @@ def status(vac: miio.Vacuum):
@cli.command()
@pass_dev
-def consumables(vac: miio.Vacuum):
+def consumables(vac: RoborockVacuum):
"""Return consumables status."""
res = vac.consumable_status()
click.echo("Main brush: %s (left %s)" % (res.main_brush, res.main_brush_left))
@@ -147,13 +144,11 @@ def consumables(vac: miio.Vacuum):
@cli.command()
@click.argument("name", type=str, required=True)
@pass_dev
-def reset_consumable(vac: miio.Vacuum, name):
+def reset_consumable(vac: RoborockVacuum, name):
"""Reset consumable state.
Allowed values: main_brush, side_brush, filter, sensor_dirty
"""
- from miio.vacuum import Consumable
-
if name == "main_brush":
consumable = Consumable.MainBrush
elif name == "side_brush":
@@ -173,35 +168,35 @@ def reset_consumable(vac: miio.Vacuum, name):
@cli.command()
@pass_dev
-def start(vac: miio.Vacuum):
+def start(vac: RoborockVacuum):
"""Start cleaning."""
click.echo("Starting cleaning: %s" % vac.start())
@cli.command()
@pass_dev
-def spot(vac: miio.Vacuum):
+def spot(vac: RoborockVacuum):
"""Start spot cleaning."""
click.echo("Starting spot cleaning: %s" % vac.spot())
@cli.command()
@pass_dev
-def pause(vac: miio.Vacuum):
+def pause(vac: RoborockVacuum):
"""Pause cleaning."""
click.echo("Pausing: %s" % vac.pause())
@cli.command()
@pass_dev
-def stop(vac: miio.Vacuum):
+def stop(vac: RoborockVacuum):
"""Stop cleaning."""
click.echo("Stop cleaning: %s" % vac.stop())
@cli.command()
@pass_dev
-def home(vac: miio.Vacuum):
+def home(vac: RoborockVacuum):
"""Return home."""
click.echo("Requesting return to home: %s" % vac.home())
@@ -210,7 +205,7 @@ def home(vac: miio.Vacuum):
@pass_dev
@click.argument("x_coord", type=int)
@click.argument("y_coord", type=int)
-def goto(vac: miio.Vacuum, x_coord: int, y_coord: int):
+def goto(vac: RoborockVacuum, x_coord: int, y_coord: int):
"""Go to specific target."""
click.echo("Going to target : %s" % vac.goto(x_coord, y_coord))
@@ -218,7 +213,7 @@ def goto(vac: miio.Vacuum, x_coord: int, y_coord: int):
@cli.command()
@pass_dev
@click.argument("zones", type=LiteralParamType(), required=True)
-def zoned_clean(vac: miio.Vacuum, zones: List):
+def zoned_clean(vac: RoborockVacuum, zones: List):
"""Clean zone."""
click.echo("Cleaning zone(s) : %s" % vac.zoned_clean(zones))
@@ -226,7 +221,7 @@ def zoned_clean(vac: miio.Vacuum, zones: List):
@cli.group()
@pass_dev
# @click.argument('command', required=False)
-def manual(vac: miio.Vacuum):
+def manual(vac: RoborockVacuum):
"""Control the robot manually."""
command = ""
if command == "start":
@@ -240,14 +235,14 @@ def manual(vac: miio.Vacuum):
@manual.command()
@pass_dev
-def tui(vac: miio.Vacuum):
+def tui(vac: RoborockVacuum):
"""TUI for the manual mode."""
VacuumTUI(vac).run()
@manual.command(name="start")
@pass_dev
-def manual_start(vac: miio.Vacuum): # noqa: F811 # redef of start
+def manual_start(vac: RoborockVacuum): # noqa: F811 # redef of start
"""Activate the manual mode."""
click.echo("Activating manual controls")
return vac.manual_start()
@@ -255,7 +250,7 @@ def manual_start(vac: miio.Vacuum): # noqa: F811 # redef of start
@manual.command(name="stop")
@pass_dev
-def manual_stop(vac: miio.Vacuum): # noqa: F811 # redef of stop
+def manual_stop(vac: RoborockVacuum): # noqa: F811 # redef of stop
"""Deactivate the manual mode."""
click.echo("Deactivating manual controls")
return vac.manual_stop()
@@ -264,7 +259,7 @@ def manual_stop(vac: miio.Vacuum): # noqa: F811 # redef of stop
@manual.command()
@pass_dev
@click.argument("degrees", type=int)
-def left(vac: miio.Vacuum, degrees: int):
+def left(vac: RoborockVacuum, degrees: int):
"""Turn to left."""
click.echo("Turning %s degrees left" % degrees)
return vac.manual_control(degrees, 0)
@@ -273,7 +268,7 @@ def left(vac: miio.Vacuum, degrees: int):
@manual.command()
@pass_dev
@click.argument("degrees", type=int)
-def right(vac: miio.Vacuum, degrees: int):
+def right(vac: RoborockVacuum, degrees: int):
"""Turn to right."""
click.echo("Turning right")
return vac.manual_control(-degrees, 0)
@@ -282,7 +277,7 @@ def right(vac: miio.Vacuum, degrees: int):
@manual.command()
@click.argument("amount", type=float)
@pass_dev
-def forward(vac: miio.Vacuum, amount: float):
+def forward(vac: RoborockVacuum, amount: float):
"""Run forwards."""
click.echo("Moving forwards")
return vac.manual_control(0, amount)
@@ -291,7 +286,7 @@ def forward(vac: miio.Vacuum, amount: float):
@manual.command()
@click.argument("amount", type=float)
@pass_dev
-def backward(vac: miio.Vacuum, amount: float):
+def backward(vac: RoborockVacuum, amount: float):
"""Run backwards."""
click.echo("Moving backwards")
return vac.manual_control(0, -amount)
@@ -302,7 +297,7 @@ def backward(vac: miio.Vacuum, amount: float):
@click.argument("rotation", type=float)
@click.argument("velocity", type=float)
@click.argument("duration", type=int)
-def move(vac: miio.Vacuum, rotation: int, velocity: float, duration: int):
+def move(vac: RoborockVacuum, rotation: int, velocity: float, duration: int):
"""Pass raw manual values."""
return vac.manual_control(rotation, velocity, duration)
@@ -315,7 +310,12 @@ def move(vac: miio.Vacuum, rotation: int, velocity: float, duration: int):
@click.argument("end_min", type=int, required=False)
@pass_dev
def dnd(
- vac: miio.Vacuum, cmd: str, start_hr: int, start_min: int, end_hr: int, end_min: int
+ vac: RoborockVacuum,
+ cmd: str,
+ start_hr: int,
+ start_min: int,
+ end_hr: int,
+ end_min: int,
):
"""Query and adjust do-not-disturb mode."""
if cmd == "off":
@@ -339,7 +339,7 @@ def dnd(
@cli.command()
@click.argument("speed", type=int, required=False)
@pass_dev
-def fanspeed(vac: miio.Vacuum, speed):
+def fanspeed(vac: RoborockVacuum, speed):
"""Query and adjust the fan speed."""
if speed:
click.echo("Setting fan speed to %s" % speed)
@@ -351,7 +351,7 @@ def fanspeed(vac: miio.Vacuum, speed):
@cli.group(invoke_without_command=True)
@pass_dev
@click.pass_context
-def timer(ctx, vac: miio.Vacuum):
+def timer(ctx, vac: RoborockVacuum):
"""List and modify existing timers."""
if ctx.invoked_subcommand is not None:
return
@@ -377,7 +377,7 @@ def timer(ctx, vac: miio.Vacuum):
@click.option("--command", default="", required=False)
@click.option("--params", default="", required=False)
@pass_dev
-def add(vac: miio.Vacuum, cron, command, params):
+def add(vac: RoborockVacuum, cron, command, params):
"""Add a timer."""
click.echo(vac.add_timer(cron, command, params))
@@ -385,7 +385,7 @@ def add(vac: miio.Vacuum, cron, command, params):
@timer.command()
@click.argument("timer_id", type=int, required=True)
@pass_dev
-def delete(vac: miio.Vacuum, timer_id):
+def delete(vac: RoborockVacuum, timer_id):
"""Delete a timer."""
click.echo(vac.delete_timer(timer_id))
@@ -395,10 +395,8 @@ def delete(vac: miio.Vacuum, timer_id):
@click.option("--enable", is_flag=True)
@click.option("--disable", is_flag=True)
@pass_dev
-def update(vac: miio.Vacuum, timer_id, enable, disable):
+def update(vac: RoborockVacuum, timer_id, enable, disable):
"""Enable/disable a timer."""
- from miio.vacuum import TimerState
-
if enable and not disable:
vac.update_timer(timer_id, TimerState.On)
elif disable and not enable:
@@ -409,7 +407,7 @@ def update(vac: miio.Vacuum, timer_id, enable, disable):
@cli.command()
@pass_dev
-def find(vac: miio.Vacuum):
+def find(vac: RoborockVacuum):
"""Find the robot."""
click.echo("Sending find the robot calls.")
click.echo(vac.find())
@@ -417,14 +415,14 @@ def find(vac: miio.Vacuum):
@cli.command()
@pass_dev
-def map(vac: miio.Vacuum):
+def map(vac: RoborockVacuum):
"""Return the map token."""
click.echo(vac.map())
@cli.command()
@pass_dev
-def info(vac: miio.Vacuum):
+def info(vac: RoborockVacuum):
"""Return device information."""
try:
res = vac.info()
@@ -440,7 +438,7 @@ def info(vac: miio.Vacuum):
@cli.command()
@pass_dev
-def cleaning_history(vac: miio.Vacuum):
+def cleaning_history(vac: RoborockVacuum):
"""Query the cleaning history."""
res = vac.clean_history()
click.echo("Total clean count: %s" % res.count)
@@ -468,7 +466,7 @@ def cleaning_history(vac: miio.Vacuum):
@click.argument("volume", type=int, required=False)
@click.option("--test", "test_mode", is_flag=True, help="play a test tune")
@pass_dev
-def sound(vac: miio.Vacuum, volume: int, test_mode: bool):
+def sound(vac: RoborockVacuum, volume: int, test_mode: bool):
"""Query and change sound settings."""
if volume is not None:
click.echo("Setting sound volume to %s" % volume)
@@ -486,7 +484,7 @@ def sound(vac: miio.Vacuum, volume: int, test_mode: bool):
@click.option("--sid", type=int, required=False, default=10000)
@click.option("--ip", required=False)
@pass_dev
-def install_sound(vac: miio.Vacuum, url: str, md5sum: str, sid: int, ip: str):
+def install_sound(vac: RoborockVacuum, url: str, md5sum: str, sid: int, ip: str):
"""Install a sound.
When passing a local file this will create a self-hosting server
@@ -536,7 +534,7 @@ def install_sound(vac: miio.Vacuum, url: str, md5sum: str, sid: int, ip: str):
@cli.command()
@pass_dev
-def serial_number(vac: miio.Vacuum):
+def serial_number(vac: RoborockVacuum):
"""Query serial number."""
click.echo("Serial#: %s" % vac.serial_number())
@@ -544,7 +542,7 @@ def serial_number(vac: miio.Vacuum):
@cli.command()
@click.argument("tz", required=False)
@pass_dev
-def timezone(vac: miio.Vacuum, tz=None):
+def timezone(vac: RoborockVacuum, tz=None):
"""Query or set the timezone."""
if tz is not None:
click.echo("Setting timezone to: %s" % tz)
@@ -556,7 +554,7 @@ def timezone(vac: miio.Vacuum, tz=None):
@cli.command()
@click.argument("enabled", required=False, type=bool)
@pass_dev
-def carpet_mode(vac: miio.Vacuum, enabled=None):
+def carpet_mode(vac: RoborockVacuum, enabled=None):
"""Query or set the carpet mode."""
if enabled is None:
click.echo(vac.carpet_mode())
@@ -567,7 +565,7 @@ def carpet_mode(vac: miio.Vacuum, enabled=None):
@cli.command()
@click.argument("mode", required=False, type=str)
@pass_dev
-def carpet_cleaning_mode(vac: miio.Vacuum, mode=None):
+def carpet_cleaning_mode(vac: RoborockVacuum, mode=None):
"""Query or set the carpet cleaning/avoidance mode.
Allowed values: Avoid, Rise, Ignore
@@ -588,7 +586,9 @@ def carpet_cleaning_mode(vac: miio.Vacuum, mode=None):
@click.argument("uid", type=int, required=False)
@click.option("--timezone", type=str, required=False, default=None)
@pass_dev
-def configure_wifi(vac: miio.Vacuum, ssid: str, password: str, uid: int, timezone: str):
+def configure_wifi(
+ vac: RoborockVacuum, ssid: str, password: str, uid: int, timezone: str
+):
"""Configure the wifi settings.
Note that some newer firmwares may expect you to define the timezone by using
@@ -600,7 +600,7 @@ def configure_wifi(vac: miio.Vacuum, ssid: str, password: str, uid: int, timezon
@cli.command()
@pass_dev
-def update_status(vac: miio.Vacuum):
+def update_status(vac: RoborockVacuum):
"""Return update state and progress."""
update_state = vac.update_state()
click.echo("Update state: %s" % update_state)
@@ -614,7 +614,7 @@ def update_status(vac: miio.Vacuum):
@click.argument("md5", required=False, default=None)
@click.option("--ip", required=False)
@pass_dev
-def update_firmware(vac: miio.Vacuum, url: str, md5: str, ip: str):
+def update_firmware(vac: RoborockVacuum, url: str, md5: str, ip: str):
"""Update device firmware.
If `url` starts with http* it is expected to be an URL.
@@ -671,7 +671,7 @@ def update_firmware(vac: miio.Vacuum, url: str, md5: str, ip: str):
@click.argument("cmd", required=True)
@click.argument("parameters", required=False)
@pass_dev
-def raw_command(vac: miio.Vacuum, cmd, parameters):
+def raw_command(vac: RoborockVacuum, cmd, parameters):
"""Run a raw command."""
params = [] # type: Any
if parameters:
diff --git a/miio/integrations/vacuum/roborock/vacuum_tui.py b/miio/integrations/vacuum/roborock/vacuum_tui.py
index 986dc9c72..6dd2ab25c 100644
--- a/miio/integrations/vacuum/roborock/vacuum_tui.py
+++ b/miio/integrations/vacuum/roborock/vacuum_tui.py
@@ -8,7 +8,7 @@
import enum
from typing import Tuple
-from .vacuum import Vacuum
+from .vacuum import RoborockVacuum as Vacuum
class Control(enum.Enum):
diff --git a/miio/vacuum.py b/miio/vacuum.py
new file mode 100644
index 000000000..fd993ee9f
--- /dev/null
+++ b/miio/vacuum.py
@@ -0,0 +1,10 @@
+"""This file is just for compat reasons and prints out a deprecated warning when
+executed."""
+import warnings
+
+from .integrations.vacuum.roborock.vacuum import * # noqa: F403,F401
+
+warnings.warn(
+ "miio.vacuum module has been renamed to miio.integrations.vacuum.roborock.vacuum",
+ DeprecationWarning,
+)
diff --git a/pyproject.toml b/pyproject.toml
index 9165b1b9e..10e238084 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -13,7 +13,7 @@ packages = [
keywords = ["xiaomi", "miio", "miot", "smart home"]
[tool.poetry.scripts]
-mirobo = "miio.integrations.roborock.vacuum_cli:cli"
+mirobo = "miio.integrations.vacuum.roborock.vacuum_cli:cli"
miio-extract-tokens = "miio.extract_tokens:main"
miiocli = "miio.cli:create_cli"
| diff --git a/miio/integrations/vacuum/roborock/tests/test_vacuum.py b/miio/integrations/vacuum/roborock/tests/test_vacuum.py
index 3a25c47f5..d08d8a586 100644
--- a/miio/integrations/vacuum/roborock/tests/test_vacuum.py
+++ b/miio/integrations/vacuum/roborock/tests/test_vacuum.py
@@ -4,13 +4,13 @@
import pytest
-from miio import Vacuum, VacuumStatus
+from miio import RoborockVacuum, Vacuum, VacuumStatus
from miio.tests.dummies import DummyDevice
from ..vacuum import CarpetCleaningMode, MopMode
-class DummyVacuum(DummyDevice, Vacuum):
+class DummyVacuum(DummyDevice, RoborockVacuum):
STATE_CHARGING = 8
STATE_CLEANING = 5
STATE_ZONED_CLEAN = 9
@@ -311,3 +311,11 @@ def test_mop_mode(self):
with patch.object(self.device, "send", return_value=[32453]):
assert self.device.mop_mode() is None
+
+
+def test_deprecated_vacuum(caplog):
+ with pytest.deprecated_call():
+ Vacuum("127.1.1.1", "68ffffffffffffffffffffffffffffff")
+
+ with pytest.deprecated_call():
+ from miio.vacuum import ROCKROBO_S6 # noqa: F401
| diff --git a/pyproject.toml b/pyproject.toml
index 9165b1b9e..10e238084 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -13,7 +13,7 @@ packages = [
keywords = ["xiaomi", "miio", "miot", "smart home"]
[tool.poetry.scripts]
-mirobo = "miio.integrations.roborock.vacuum_cli:cli"
+mirobo = "miio.integrations.vacuum.roborock.vacuum_cli:cli"
miio-extract-tokens = "miio.extract_tokens:main"
miiocli = "miio.cli:create_cli"
| [
{
"components": [
{
"doc": "Main class for roborock vacuums (roborock.vacuum.*).",
"lines": [
144,
889
],
"name": "RoborockVacuum",
"signature": "class RoborockVacuum(Device):",
"type": "class"
},
{
"doc": "",
... | [
"miio/integrations/vacuum/roborock/tests/test_vacuum.py::TestVacuum::test_carpet_cleaning_mode",
"miio/integrations/vacuum/roborock/tests/test_vacuum.py::TestVacuum::test_goto",
"miio/integrations/vacuum/roborock/tests/test_vacuum.py::TestVacuum::test_history",
"miio/integrations/vacuum/roborock/tests/test_va... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Deprecate roborock specific miio.Vacuum
In order to make `Vacuum` as a usable base class for all vacuum implementations in the future, let's inform potential downstream users to convert their code to use the roborockvacuum class:
* Initializing `miio.Vacuum()` instructs now to use the `RoborockVacuum`
* `miio.vacuum` exposes the roborockvacuum internals while giving a deprecation warning
This should not break any existing code but marking it as such anyway to give it more visibility in the changelog.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in miio/integrations/vacuum/roborock/vacuum.py]
(definition of RoborockVacuum:)
class RoborockVacuum(Device):
"""Main class for roborock vacuums (roborock.vacuum.*)."""
(definition of RoborockVacuum.__init__:)
def __init__( self, ip: str, token: str = None, start_id: int = 0, debug: int = 0, *, model=None ):
(definition of RoborockVacuum.start:)
def start(self):
"""Start cleaning."""
(definition of RoborockVacuum.stop:)
def stop(self):
"""Stop cleaning.
Note, prefer 'pause' instead of this for wider support. Some newer vacuum models
do not support this command."""
(definition of RoborockVacuum.spot:)
def spot(self):
"""Start spot cleaning."""
(definition of RoborockVacuum.pause:)
def pause(self):
"""Pause cleaning."""
(definition of RoborockVacuum.resume_or_start:)
def resume_or_start(self):
"""A shortcut for resuming or starting cleaning."""
(definition of RoborockVacuum._fetch_info:)
def _fetch_info(self) -> DeviceInfo:
"""Return info about the device.
This is overrides the base class info to account for gen1 devices that do not
respond to info query properly when not connected to the cloud."""
(definition of RoborockVacuum.home:)
def home(self):
"""Stop cleaning and return home."""
(definition of RoborockVacuum.goto:)
def goto(self, x_coord: int, y_coord: int):
"""Go to specific target.
:param int x_coord: x coordinate
:param int y_coord: y coordinate"""
(definition of RoborockVacuum.zoned_clean:)
def zoned_clean(self, zones: List):
"""Clean zones.
:param List zones: List of zones to clean: [[x1,y1,x2,y2, iterations],[x1,y1,x2,y2, iterations]]"""
(definition of RoborockVacuum.resume_zoned_clean:)
def resume_zoned_clean(self):
"""Resume zone cleaning after being paused."""
(definition of RoborockVacuum.manual_start:)
def manual_start(self):
"""Start manual control mode."""
(definition of RoborockVacuum.manual_stop:)
def manual_stop(self):
"""Stop manual control mode."""
(definition of RoborockVacuum.manual_control_once:)
def manual_control_once( self, rotation: int, velocity: float, duration: int = MANUAL_DURATION_DEFAULT ):
"""Starts the remote control mode and executes the action once before
deactivating the mode."""
(definition of RoborockVacuum.manual_control:)
def manual_control( self, rotation: int, velocity: float, duration: int = MANUAL_DURATION_DEFAULT ):
"""Give a command over manual control interface."""
(definition of RoborockVacuum.status:)
def status(self) -> VacuumStatus:
"""Return status of the vacuum."""
(definition of RoborockVacuum.enable_log_upload:)
def enable_log_upload(self):
(definition of RoborockVacuum.log_upload_status:)
def log_upload_status(self):
(definition of RoborockVacuum.consumable_status:)
def consumable_status(self) -> ConsumableStatus:
"""Return information about consumables."""
(definition of RoborockVacuum.consumable_reset:)
def consumable_reset(self, consumable: Consumable):
"""Reset consumable information."""
(definition of RoborockVacuum.map:)
def map(self):
"""Return map token."""
(definition of RoborockVacuum.edit_map:)
def edit_map(self, start):
"""Start map editing?"""
(definition of RoborockVacuum.fresh_map:)
def fresh_map(self, version):
"""Return fresh map?"""
(definition of RoborockVacuum.persist_map:)
def persist_map(self, version):
"""Return fresh map?"""
(definition of RoborockVacuum.create_software_barrier:)
def create_software_barrier(self, x1, y1, x2, y2):
"""Create software barrier (gen2 only?).
NOTE: Multiple nogo zones and barriers could be added by passing
a list of them to save_map.
Requires new fw version.
3.3.9_001633+?"""
(definition of RoborockVacuum.create_nogo_zone:)
def create_nogo_zone(self, x1, y1, x2, y2, x3, y3, x4, y4):
"""Create a rectangular no-go zone (gen2 only?).
NOTE: Multiple nogo zones and barriers could be added by passing
a list of them to save_map.
Requires new fw version.
3.3.9_001633+?"""
(definition of RoborockVacuum.enable_lab_mode:)
def enable_lab_mode(self, enable):
"""Enable persistent maps and software barriers.
This is required to use create_nogo_zone and create_software_barrier commands."""
(definition of RoborockVacuum.clean_history:)
def clean_history(self) -> CleaningSummary:
"""Return generic cleaning history."""
(definition of RoborockVacuum.last_clean_details:)
def last_clean_details(self) -> Optional[CleaningDetails]:
"""Return details from the last cleaning.
Returns None if there has been no cleanups."""
(definition of RoborockVacuum.clean_details:)
def clean_details( self, id_: int ) -> Union[List[CleaningDetails], Optional[CleaningDetails]]:
"""Return details about specific cleaning."""
(definition of RoborockVacuum.find:)
def find(self):
"""Find the robot."""
(definition of RoborockVacuum.timer:)
def timer(self) -> List[Timer]:
"""Return a list of timers."""
(definition of RoborockVacuum.add_timer:)
def add_timer(self, cron: str, command: str, parameters: str):
"""Add a timer.
:param cron: schedule in cron format
:param command: ignored by the vacuum.
:param parameters: ignored by the vacuum."""
(definition of RoborockVacuum.delete_timer:)
def delete_timer(self, timer_id: int):
"""Delete a timer with given ID.
:param int timer_id: Timer ID"""
(definition of RoborockVacuum.update_timer:)
def update_timer(self, timer_id: int, mode: TimerState):
"""Update a timer with given ID.
:param int timer_id: Timer ID
:param TimerStae mode: either On or Off"""
(definition of RoborockVacuum.dnd_status:)
def dnd_status(self):
"""Returns do-not-disturb status."""
(definition of RoborockVacuum.set_dnd:)
def set_dnd(self, start_hr: int, start_min: int, end_hr: int, end_min: int):
"""Set do-not-disturb.
:param int start_hr: Start hour
:param int start_min: Start minute
:param int end_hr: End hour
:param int end_min: End minute"""
(definition of RoborockVacuum.disable_dnd:)
def disable_dnd(self):
"""Disable do-not-disturb."""
(definition of RoborockVacuum.set_fan_speed:)
def set_fan_speed(self, speed: int):
"""Set fan speed.
:param int speed: Fan speed to set"""
(definition of RoborockVacuum.fan_speed:)
def fan_speed(self):
"""Return fan speed."""
(definition of RoborockVacuum.fan_speed_presets:)
def fan_speed_presets(self) -> Dict[str, int]:
"""Return dictionary containing supported fan speeds."""
(definition of RoborockVacuum.fan_speed_presets._enum_as_dict:)
def _enum_as_dict(cls):
(definition of RoborockVacuum.sound_info:)
def sound_info(self):
"""Get voice settings."""
(definition of RoborockVacuum.install_sound:)
def install_sound(self, url: str, md5sum: str, sound_id: int):
"""Install sound from the given url."""
(definition of RoborockVacuum.sound_install_progress:)
def sound_install_progress(self):
"""Get sound installation progress."""
(definition of RoborockVacuum.sound_volume:)
def sound_volume(self) -> int:
"""Get sound volume."""
(definition of RoborockVacuum.set_sound_volume:)
def set_sound_volume(self, vol: int):
"""Set sound volume [0-100]."""
(definition of RoborockVacuum.test_sound_volume:)
def test_sound_volume(self):
"""Test current sound volume."""
(definition of RoborockVacuum.serial_number:)
def serial_number(self):
"""Get serial number."""
(definition of RoborockVacuum.locale:)
def locale(self):
"""Return locale information."""
(definition of RoborockVacuum.timezone:)
def timezone(self):
"""Get the timezone."""
(definition of RoborockVacuum.timezone._fallback_timezone:)
def _fallback_timezone(data):
(definition of RoborockVacuum.set_timezone:)
def set_timezone(self, new_zone):
"""Set the timezone."""
(definition of RoborockVacuum.configure_wifi:)
def configure_wifi(self, ssid, password, uid=0, timezone=None):
"""Configure the wifi settings."""
(definition of RoborockVacuum.carpet_mode:)
def carpet_mode(self):
"""Get carpet mode settings."""
(definition of RoborockVacuum.set_carpet_mode:)
def set_carpet_mode( self, enabled: bool, stall_time: int = 10, low: int = 400, high: int = 500, integral: int = 450, ):
"""Set the carpet mode."""
(definition of RoborockVacuum.carpet_cleaning_mode:)
def carpet_cleaning_mode(self) -> Optional[CarpetCleaningMode]:
"""Get carpet cleaning mode/avoidance setting."""
(definition of RoborockVacuum.set_carpet_cleaning_mode:)
def set_carpet_cleaning_mode(self, mode: CarpetCleaningMode):
"""Set carpet cleaning mode/avoidance setting."""
(definition of RoborockVacuum.stop_zoned_clean:)
def stop_zoned_clean(self):
"""Stop cleaning a zone."""
(definition of RoborockVacuum.stop_segment_clean:)
def stop_segment_clean(self):
"""Stop cleaning a segment."""
(definition of RoborockVacuum.resume_segment_clean:)
def resume_segment_clean(self):
"""Resuming cleaning a segment."""
(definition of RoborockVacuum.segment_clean:)
def segment_clean(self, segments: List):
"""Clean segments.
:param List segments: List of segments to clean: [16,17,18]"""
(definition of RoborockVacuum.get_room_mapping:)
def get_room_mapping(self):
"""Retrieves a list of segments."""
(definition of RoborockVacuum.get_backup_maps:)
def get_backup_maps(self):
"""Get backup maps."""
(definition of RoborockVacuum.use_backup_map:)
def use_backup_map(self, id: int):
"""Set backup map."""
(definition of RoborockVacuum.get_segment_status:)
def get_segment_status(self):
"""Get the status of a segment."""
(definition of RoborockVacuum.name_segment:)
def name_segment(self):
(definition of RoborockVacuum.merge_segment:)
def merge_segment(self):
(definition of RoborockVacuum.split_segment:)
def split_segment(self):
(definition of RoborockVacuum.waterflow:)
def waterflow(self) -> WaterFlow:
"""Get water flow setting."""
(definition of RoborockVacuum.set_waterflow:)
def set_waterflow(self, waterflow: WaterFlow):
"""Set water flow setting."""
(definition of RoborockVacuum.mop_mode:)
def mop_mode(self) -> Optional[MopMode]:
"""Get mop mode setting."""
(definition of RoborockVacuum.set_mop_mode:)
def set_mop_mode(self, mop_mode: MopMode):
"""Set mop mode setting."""
(definition of RoborockVacuum.child_lock:)
def child_lock(self) -> bool:
"""Get child lock setting."""
(definition of RoborockVacuum.set_child_lock:)
def set_child_lock(self, lock: bool) -> bool:
"""Set child lock setting."""
(definition of RoborockVacuum.get_device_group:)
def get_device_group(cls): @click.pass_context
(definition of RoborockVacuum.get_device_group.callback:)
def callback(ctx, *args, id_file, **kwargs):
(definition of RoborockVacuum.get_device_group.cleanup:)
def cleanup(vac: Vacuum, *args, **kwargs):
[end of new definitions in miio/integrations/vacuum/roborock/vacuum.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 62427d2f796e603520acca3b57b29ec3e6489bca | |
scikit-learn__scikit-learn-21701 | 21,701 | scikit-learn/scikit-learn | 1.1 | a794c58692a1f3e7a85a42d8c7f7ddd5fcf18baa | 2021-11-17T22:42:33Z | diff --git a/doc/modules/random_projection.rst b/doc/modules/random_projection.rst
index adb2d53bd14d6..7d3341f0244bb 100644
--- a/doc/modules/random_projection.rst
+++ b/doc/modules/random_projection.rst
@@ -160,3 +160,42 @@ projection transformer::
In Proceedings of the 12th ACM SIGKDD international conference on
Knowledge discovery and data mining (KDD '06). ACM, New York, NY, USA,
287-296.
+
+
+.. _random_projection_inverse_transform:
+
+Inverse Transform
+=================
+The random projection transformers have ``compute_inverse_components`` parameter. When
+set to True, after creating the random ``components_`` matrix during fitting,
+the transformer computes the pseudo-inverse of this matrix and stores it as
+``inverse_components_``. The ``inverse_components_`` matrix has shape
+:math:`n_{features} \times n_{components}`, and it is always a dense matrix,
+regardless of whether the components matrix is sparse or dense. So depending on
+the number of features and components, it may use a lot of memory.
+
+When the ``inverse_transform`` method is called, it computes the product of the
+input ``X`` and the transpose of the inverse components. If the inverse components have
+been computed during fit, they are reused at each call to ``inverse_transform``.
+Otherwise they are recomputed each time, which can be costly. The result is always
+dense, even if ``X`` is sparse.
+
+Here a small code example which illustrates how to use the inverse transform
+feature::
+
+ >>> import numpy as np
+ >>> from sklearn.random_projection import SparseRandomProjection
+ >>> X = np.random.rand(100, 10000)
+ >>> transformer = SparseRandomProjection(
+ ... compute_inverse_components=True
+ ... )
+ ...
+ >>> X_new = transformer.fit_transform(X)
+ >>> X_new.shape
+ (100, 3947)
+ >>> X_new_inversed = transformer.inverse_transform(X_new)
+ >>> X_new_inversed.shape
+ (100, 10000)
+ >>> X_new_again = transformer.transform(X_new_inversed)
+ >>> np.allclose(X_new, X_new_again)
+ True
diff --git a/doc/whats_new/v1.1.rst b/doc/whats_new/v1.1.rst
index 5030ed8faa4a1..e66640cfd2d21 100644
--- a/doc/whats_new/v1.1.rst
+++ b/doc/whats_new/v1.1.rst
@@ -791,6 +791,14 @@ Changelog
:class:`random_projection.GaussianRandomProjection` preserves dtype for
`numpy.float32`. :pr:`22114` by :user:`Takeshi Oura <takoika>`.
+- |Enhancement| Adds an :meth:`inverse_transform` method and a
+ `compute_inverse_transform` parameter to all transformers in the
+ :mod:`~sklearn.random_projection` module:
+ :class:`~sklearn.random_projection.GaussianRandomProjection` and
+ :class:`~sklearn.random_projection.SparseRandomProjection`. When the parameter is set
+ to True, the pseudo-inverse of the components is computed during `fit` and stored as
+ `inverse_components_`. :pr:`21701` by `Aurélien Geron <ageron>`.
+
- |API| Adds :term:`get_feature_names_out` to all transformers in the
:mod:`~sklearn.random_projection` module:
:class:`~sklearn.random_projection.GaussianRandomProjection` and
diff --git a/sklearn/random_projection.py b/sklearn/random_projection.py
index 31ebfdddd8928..000eca478553e 100644
--- a/sklearn/random_projection.py
+++ b/sklearn/random_projection.py
@@ -31,6 +31,7 @@
from abc import ABCMeta, abstractmethod
import numpy as np
+from scipy import linalg
import scipy.sparse as sp
from .base import BaseEstimator, TransformerMixin
@@ -39,10 +40,9 @@
from .utils import check_random_state
from .utils.extmath import safe_sparse_dot
from .utils.random import sample_without_replacement
-from .utils.validation import check_is_fitted
+from .utils.validation import check_array, check_is_fitted
from .exceptions import DataDimensionalityWarning
-
__all__ = [
"SparseRandomProjection",
"GaussianRandomProjection",
@@ -302,11 +302,18 @@ class BaseRandomProjection(
@abstractmethod
def __init__(
- self, n_components="auto", *, eps=0.1, dense_output=False, random_state=None
+ self,
+ n_components="auto",
+ *,
+ eps=0.1,
+ dense_output=False,
+ compute_inverse_components=False,
+ random_state=None,
):
self.n_components = n_components
self.eps = eps
self.dense_output = dense_output
+ self.compute_inverse_components = compute_inverse_components
self.random_state = random_state
@abstractmethod
@@ -323,12 +330,18 @@ def _make_random_matrix(self, n_components, n_features):
Returns
-------
- components : {ndarray, sparse matrix} of shape \
- (n_components, n_features)
+ components : {ndarray, sparse matrix} of shape (n_components, n_features)
The generated random matrix. Sparse matrix will be of CSR format.
"""
+ def _compute_inverse_components(self):
+ """Compute the pseudo-inverse of the (densified) components."""
+ components = self.components_
+ if sp.issparse(components):
+ components = components.toarray()
+ return linalg.pinv(components, check_finite=False)
+
def fit(self, X, y=None):
"""Generate a sparse random projection matrix.
@@ -399,6 +412,9 @@ def fit(self, X, y=None):
" not the proper shape."
)
+ if self.compute_inverse_components:
+ self.inverse_components_ = self._compute_inverse_components()
+
return self
def transform(self, X):
@@ -437,6 +453,35 @@ def _n_features_out(self):
"""
return self.n_components
+ def inverse_transform(self, X):
+ """Project data back to its original space.
+
+ Returns an array X_original whose transform would be X. Note that even
+ if X is sparse, X_original is dense: this may use a lot of RAM.
+
+ If `compute_inverse_components` is False, the inverse of the components is
+ computed during each call to `inverse_transform` which can be costly.
+
+ Parameters
+ ----------
+ X : {array-like, sparse matrix} of shape (n_samples, n_components)
+ Data to be transformed back.
+
+ Returns
+ -------
+ X_original : ndarray of shape (n_samples, n_features)
+ Reconstructed data.
+ """
+ check_is_fitted(self)
+
+ X = check_array(X, dtype=[np.float64, np.float32], accept_sparse=("csr", "csc"))
+
+ if self.compute_inverse_components:
+ return X @ self.inverse_components_.T
+
+ inverse_components = self._compute_inverse_components()
+ return X @ inverse_components.T
+
def _more_tags(self):
return {
"preserves_dtype": [np.float64, np.float32],
@@ -474,6 +519,11 @@ class GaussianRandomProjection(BaseRandomProjection):
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
+ compute_inverse_components : bool, default=False
+ Learn the inverse transform by computing the pseudo-inverse of the
+ components during fit. Note that computing the pseudo-inverse does not
+ scale well to large matrices.
+
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the
projection matrix at fit time.
@@ -488,6 +538,12 @@ class GaussianRandomProjection(BaseRandomProjection):
components_ : ndarray of shape (n_components, n_features)
Random matrix used for the projection.
+ inverse_components_ : ndarray of shape (n_features, n_components)
+ Pseudo-inverse of the components, only computed if
+ `compute_inverse_components` is True.
+
+ .. versionadded:: 1.1
+
n_features_in_ : int
Number of features seen during :term:`fit`.
@@ -516,11 +572,19 @@ class GaussianRandomProjection(BaseRandomProjection):
(25, 2759)
"""
- def __init__(self, n_components="auto", *, eps=0.1, random_state=None):
+ def __init__(
+ self,
+ n_components="auto",
+ *,
+ eps=0.1,
+ compute_inverse_components=False,
+ random_state=None,
+ ):
super().__init__(
n_components=n_components,
eps=eps,
dense_output=True,
+ compute_inverse_components=compute_inverse_components,
random_state=random_state,
)
@@ -610,6 +674,14 @@ class SparseRandomProjection(BaseRandomProjection):
If False, the projected data uses a sparse representation if
the input is sparse.
+ compute_inverse_components : bool, default=False
+ Learn the inverse transform by computing the pseudo-inverse of the
+ components during fit. Note that the pseudo-inverse is always a dense
+ array, even if the training data was sparse. This means that it might be
+ necessary to call `inverse_transform` on a small batch of samples at a
+ time to avoid exhausting the available memory on the host. Moreover,
+ computing the pseudo-inverse does not scale well to large matrices.
+
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generator used to generate the
projection matrix at fit time.
@@ -625,6 +697,12 @@ class SparseRandomProjection(BaseRandomProjection):
Random matrix used for the projection. Sparse matrix will be of CSR
format.
+ inverse_components_ : ndarray of shape (n_features, n_components)
+ Pseudo-inverse of the components, only computed if
+ `compute_inverse_components` is True.
+
+ .. versionadded:: 1.1
+
density_ : float in range 0.0 - 1.0
Concrete density computed from when density = "auto".
@@ -676,12 +754,14 @@ def __init__(
density="auto",
eps=0.1,
dense_output=False,
+ compute_inverse_components=False,
random_state=None,
):
super().__init__(
n_components=n_components,
eps=eps,
dense_output=dense_output,
+ compute_inverse_components=compute_inverse_components,
random_state=random_state,
)
| diff --git a/sklearn/tests/test_random_projection.py b/sklearn/tests/test_random_projection.py
index a3a6b1ae2a49f..4d21090a3e6fb 100644
--- a/sklearn/tests/test_random_projection.py
+++ b/sklearn/tests/test_random_projection.py
@@ -1,5 +1,6 @@
import functools
from typing import List, Any
+import warnings
import numpy as np
import scipy.sparse as sp
@@ -31,8 +32,8 @@
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
-def make_sparse_random_data(n_samples, n_features, n_nonzeros):
- rng = np.random.RandomState(0)
+def make_sparse_random_data(n_samples, n_features, n_nonzeros, random_state=0):
+ rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(
rng.randn(n_nonzeros),
@@ -377,6 +378,57 @@ def test_random_projection_feature_names_out(random_projection_cls):
assert_array_equal(names_out, expected_names_out)
+@pytest.mark.parametrize("n_samples", (2, 9, 10, 11, 1000))
+@pytest.mark.parametrize("n_features", (2, 9, 10, 11, 1000))
+@pytest.mark.parametrize("random_projection_cls", all_RandomProjection)
+@pytest.mark.parametrize("compute_inverse_components", [True, False])
+def test_inverse_transform(
+ n_samples,
+ n_features,
+ random_projection_cls,
+ compute_inverse_components,
+ global_random_seed,
+):
+ n_components = 10
+
+ random_projection = random_projection_cls(
+ n_components=n_components,
+ compute_inverse_components=compute_inverse_components,
+ random_state=global_random_seed,
+ )
+
+ X_dense, X_csr = make_sparse_random_data(
+ n_samples,
+ n_features,
+ n_samples * n_features // 100 + 1,
+ random_state=global_random_seed,
+ )
+
+ for X in [X_dense, X_csr]:
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ message=(
+ "The number of components is higher than the number of features"
+ ),
+ category=DataDimensionalityWarning,
+ )
+ projected = random_projection.fit_transform(X)
+
+ if compute_inverse_components:
+ assert hasattr(random_projection, "inverse_components_")
+ inv_components = random_projection.inverse_components_
+ assert inv_components.shape == (n_features, n_components)
+
+ projected_back = random_projection.inverse_transform(projected)
+ assert projected_back.shape == X.shape
+
+ projected_again = random_projection.transform(projected_back)
+ if hasattr(projected, "toarray"):
+ projected = projected.toarray()
+ assert_allclose(projected, projected_again, rtol=1e-7, atol=1e-10)
+
+
@pytest.mark.parametrize("random_projection_cls", all_RandomProjection)
@pytest.mark.parametrize(
"input_dtype, expected_dtype",
| diff --git a/doc/modules/random_projection.rst b/doc/modules/random_projection.rst
index adb2d53bd14d6..7d3341f0244bb 100644
--- a/doc/modules/random_projection.rst
+++ b/doc/modules/random_projection.rst
@@ -160,3 +160,42 @@ projection transformer::
In Proceedings of the 12th ACM SIGKDD international conference on
Knowledge discovery and data mining (KDD '06). ACM, New York, NY, USA,
287-296.
+
+
+.. _random_projection_inverse_transform:
+
+Inverse Transform
+=================
+The random projection transformers have ``compute_inverse_components`` parameter. When
+set to True, after creating the random ``components_`` matrix during fitting,
+the transformer computes the pseudo-inverse of this matrix and stores it as
+``inverse_components_``. The ``inverse_components_`` matrix has shape
+:math:`n_{features} \times n_{components}`, and it is always a dense matrix,
+regardless of whether the components matrix is sparse or dense. So depending on
+the number of features and components, it may use a lot of memory.
+
+When the ``inverse_transform`` method is called, it computes the product of the
+input ``X`` and the transpose of the inverse components. If the inverse components have
+been computed during fit, they are reused at each call to ``inverse_transform``.
+Otherwise they are recomputed each time, which can be costly. The result is always
+dense, even if ``X`` is sparse.
+
+Here a small code example which illustrates how to use the inverse transform
+feature::
+
+ >>> import numpy as np
+ >>> from sklearn.random_projection import SparseRandomProjection
+ >>> X = np.random.rand(100, 10000)
+ >>> transformer = SparseRandomProjection(
+ ... compute_inverse_components=True
+ ... )
+ ...
+ >>> X_new = transformer.fit_transform(X)
+ >>> X_new.shape
+ (100, 3947)
+ >>> X_new_inversed = transformer.inverse_transform(X_new)
+ >>> X_new_inversed.shape
+ (100, 10000)
+ >>> X_new_again = transformer.transform(X_new_inversed)
+ >>> np.allclose(X_new, X_new_again)
+ True
diff --git a/doc/whats_new/v1.1.rst b/doc/whats_new/v1.1.rst
index 5030ed8faa4a1..e66640cfd2d21 100644
--- a/doc/whats_new/v1.1.rst
+++ b/doc/whats_new/v1.1.rst
@@ -791,6 +791,14 @@ Changelog
:class:`random_projection.GaussianRandomProjection` preserves dtype for
`numpy.float32`. :pr:`22114` by :user:`Takeshi Oura <takoika>`.
+- |Enhancement| Adds an :meth:`inverse_transform` method and a
+ `compute_inverse_transform` parameter to all transformers in the
+ :mod:`~sklearn.random_projection` module:
+ :class:`~sklearn.random_projection.GaussianRandomProjection` and
+ :class:`~sklearn.random_projection.SparseRandomProjection`. When the parameter is set
+ to True, the pseudo-inverse of the components is computed during `fit` and stored as
+ `inverse_components_`. :pr:`21701` by `Aurélien Geron <ageron>`.
+
- |API| Adds :term:`get_feature_names_out` to all transformers in the
:mod:`~sklearn.random_projection` module:
:class:`~sklearn.random_projection.GaussianRandomProjection` and
| [
{
"components": [
{
"doc": "Compute the pseudo-inverse of the (densified) components.",
"lines": [
338,
343
],
"name": "BaseRandomProjection._compute_inverse_components",
"signature": "def _compute_inverse_components(self):",
"type": "fun... | [
"sklearn/tests/test_random_projection.py::test_inverse_transform[42-True-SparseRandomProjection-2-2]",
"sklearn/tests/test_random_projection.py::test_inverse_transform[42-True-SparseRandomProjection-2-9]",
"sklearn/tests/test_random_projection.py::test_inverse_transform[42-True-SparseRandomProjection-2-10]",
... | [
"sklearn/tests/test_random_projection.py::test_invalid_jl_domain[100-1.1]",
"sklearn/tests/test_random_projection.py::test_invalid_jl_domain[100-0.0]",
"sklearn/tests/test_random_projection.py::test_invalid_jl_domain[100--0.1]",
"sklearn/tests/test_random_projection.py::test_invalid_jl_domain[0-0.5]",
"skle... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
ENH Add inverse_transform to random projection transformers
#### Reference Issues/PRs
Fixes #21687
#### What does this implement/fix? Explain your changes.
Adds a `fit_inverse_transform` parameter to all transformers in the `sklearn.random_projection` module: `GaussianRandomProjection` and `SparseRandomProjection`. When set to `True`, the pseudo-inverse of the components is computed during `fit()` and stored in `components_pinv_`, and `inverse_transform()` becomes available.
#### Any other comments?
Using the pseudo-inverse makes sense to me, and it seems to work fine, in the sense that `rnd_proj.transform(rnd_proj.inverse_transform(X))` equals `X`. However, this implementation uses `scipy.linalg.pinv()`, which scales very poorly to large datasets, which is a major use case for random projections. Perhaps it would make sense to use another approach if we detect that the `components_` array is large?
And perhaps there's a mathematical way to generate both a random matrix and its inverse more efficiently?
For the `SparseRandomProjection` transformer, computing the pseudo-inverse breaks sparsity. Perhaps there's a way to generate a sparse matrix that is "close enough" rather than using the pseudo-inverse?
In short: if there's a Random Projection expert in the room, please speak up!
That said, it seems to work fine now, so performance improvements could be pushed in follow-up PRs.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sklearn/random_projection.py]
(definition of BaseRandomProjection._compute_inverse_components:)
def _compute_inverse_components(self):
"""Compute the pseudo-inverse of the (densified) components."""
(definition of BaseRandomProjection.inverse_transform:)
def inverse_transform(self, X):
"""Project data back to its original space.
Returns an array X_original whose transform would be X. Note that even
if X is sparse, X_original is dense: this may use a lot of RAM.
If `compute_inverse_components` is False, the inverse of the components is
computed during each call to `inverse_transform` which can be costly.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_components)
Data to be transformed back.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Reconstructed data."""
[end of new definitions in sklearn/random_projection.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Add inverse_transform() to random projection classes
### Describe the workflow you want to enable
`GaussianRandomProjection` and `SparseRandomProjection` should have an `inverse_transform()` method.
### Describe your proposed solution
Simply add the `inverse_transform()` method, with the appropriate input validation and tests, and return `X @ np.linalg.pinv(self.components_.T)`.
### Describe alternatives you've considered, if relevant
N/A
### Additional context
My understanding is that every transformer that performs a reversible transformation should have an `inverse_transform()` method, unless there's a really good reason not to (e.g., if it's terribly inefficient, or there's a better alternative). Was the `inverse_transform()` left out because it's too inefficient?
----------
As computing the inverse is costly, it probably should be done in the `fit()` method, instead of every time we call `inverse_transform()`. But since it's not needed that often, I guess it should only be computed if an `inverse` hyperparameter is set to `True` (and the default would be `False`).
I guess `np.linalg.pinv(self.components_.T)` can be cached/stored the first time `inverse_transform` is called.
I suppose it could be, but I thought the usual approach was to learn stuff in `fit()`, similar to how `probability=True` in SVC makes it learn probas and adds the `predict_proba()` method. WDYT?
true, you could open a PR with that approach.
--------------------
</issues> | 38ff5be25d0164bf9598bcfdde3b791ad6e261b0 |
Textualize__rich-1706 | 1,706 | Textualize/rich | null | 008854c40772f647dfcb873bc3489e8a1c02d598 | 2021-11-17T21:21:55Z | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 77dd5a0a9b..2b1dc69d04 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -24,6 +24,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Allowed `__rich__` to work recursively
- Allowed Text classes to work with sep in print https://github.com/willmcgugan/rich/issues/1689
+### Added
+
+- Added a `rich.text.Text.from_ansi` helper method for handling pre-formatted input strings https://github.com/willmcgugan/rich/issues/1670
+
## [10.13.0] - 2021-11-07
### Added
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 0e299be6e7..557f548e8d 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -22,4 +22,5 @@ The following people have contributed to the development of Rich:
- [Clément Robert](https://github.com/neutrinoceros)
- [Tushar Sadhwani](https://github.com/tusharsadhwani)
- [Tim Savage](https://github.com/timsavage)
+- [Nicolas Simonds](https://github.com/0xDEC0DE)
- [Gabriele N. Tornetta](https://github.com/p403n1x87)
diff --git a/docs/source/text.rst b/docs/source/text.rst
index b5f2eaf8f1..142b0639e1 100644
--- a/docs/source/text.rst
+++ b/docs/source/text.rst
@@ -26,6 +26,11 @@ Alternatively, you can construct styled text by calling :meth:`~rich.text.Text.a
text.append(" World!")
console.print(text)
+If you would like to use text that is already formatted with ANSI codes, call :meth:`~rich.text.Text.from_ansi` to convert it to a ``Text`` object:
+
+ text = Text.from_ansi("\033[1mHello, World!\033[0m")
+ console.print(text.spans)
+
Since building Text instances from parts is a common requirement, Rich offers :meth:`~rich.text.Text.assemble` which will combine strings or pairs of string and Style, and return a Text instance. The follow example is equivalent to the code above::
text = Text.assemble(("Hello", "bold magenta"), " World!")
diff --git a/rich/text.py b/rich/text.py
index 52ecd16042..c49e152b60 100644
--- a/rich/text.py
+++ b/rich/text.py
@@ -242,6 +242,40 @@ def from_markup(
rendered_text.overflow = overflow
return rendered_text
+ @classmethod
+ def from_ansi(
+ cls,
+ text: str,
+ *,
+ style: Union[str, Style] = "",
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ no_wrap: Optional[bool] = None,
+ end: str = "\n",
+ tab_size: Optional[int] = 8,
+ ) -> "Text":
+ """Create a Text object from pre-formatted ANSI.
+
+ Args:
+ text (str): A string containing ANSI color codes.
+ style (Union[str, Style], optional): Base style for text. Defaults to "".
+ justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+ overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+ no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
+ end (str, optional): Character to end text with. Defaults to "\\\\n".
+ tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
+ """
+ from .ansi import AnsiDecoder
+
+ decoded_text = AnsiDecoder().decode_line(text)
+ decoded_text.justify = justify
+ decoded_text.overflow = overflow
+ decoded_text.no_wrap = no_wrap
+ decoded_text.end = end
+ decoded_text.tab_size = tab_size
+ decoded_text.stylize(style)
+ return decoded_text
+
@classmethod
def styled(
cls,
| diff --git a/tests/test_text.py b/tests/test_text.py
index 3727d1602c..6eecb9ee73 100644
--- a/tests/test_text.py
+++ b/tests/test_text.py
@@ -95,6 +95,15 @@ def test_from_markup():
assert text._spans == [Span(7, 13, "bold")]
+def test_from_ansi():
+ text = Text.from_ansi("Hello, \033[1mWorld!\033[0m")
+ text2 = Text.from_ansi("Hello, \033[1mWorld!\033[0m", style="red")
+ assert str(text) == "Hello, World!"
+ assert text._spans == [Span(7, 13, Style(bold=True))]
+ assert str(text2) == "Hello, World!"
+ assert text2._spans == [Span(7, 13, Style(bold=True)), Span(0, 13, "red")]
+
+
def test_copy():
test = Text()
test.append("Hello", "bold")
| diff --git a/CHANGELOG.md b/CHANGELOG.md
index 77dd5a0a9b..2b1dc69d04 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -24,6 +24,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Allowed `__rich__` to work recursively
- Allowed Text classes to work with sep in print https://github.com/willmcgugan/rich/issues/1689
+### Added
+
+- Added a `rich.text.Text.from_ansi` helper method for handling pre-formatted input strings https://github.com/willmcgugan/rich/issues/1670
+
## [10.13.0] - 2021-11-07
### Added
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 0e299be6e7..557f548e8d 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -22,4 +22,5 @@ The following people have contributed to the development of Rich:
- [Clément Robert](https://github.com/neutrinoceros)
- [Tushar Sadhwani](https://github.com/tusharsadhwani)
- [Tim Savage](https://github.com/timsavage)
+- [Nicolas Simonds](https://github.com/0xDEC0DE)
- [Gabriele N. Tornetta](https://github.com/p403n1x87)
diff --git a/docs/source/text.rst b/docs/source/text.rst
index b5f2eaf8f1..142b0639e1 100644
--- a/docs/source/text.rst
+++ b/docs/source/text.rst
@@ -26,6 +26,11 @@ Alternatively, you can construct styled text by calling :meth:`~rich.text.Text.a
text.append(" World!")
console.print(text)
+If you would like to use text that is already formatted with ANSI codes, call :meth:`~rich.text.Text.from_ansi` to convert it to a ``Text`` object:
+
+ text = Text.from_ansi("\033[1mHello, World!\033[0m")
+ console.print(text.spans)
+
Since building Text instances from parts is a common requirement, Rich offers :meth:`~rich.text.Text.assemble` which will combine strings or pairs of string and Style, and return a Text instance. The follow example is equivalent to the code above::
text = Text.assemble(("Hello", "bold magenta"), " World!")
| [
{
"components": [
{
"doc": "Create a Text object from pre-formatted ANSI.\n\nArgs:\n text (str): A string containing ANSI color codes.\n style (Union[str, Style], optional): Base style for text. Defaults to \"\".\n justify (str, optional): Justify method: \"left\", \"center\", \"full\", \... | [
"tests/test_text.py::test_from_ansi"
] | [
"tests/test_text.py::test_span",
"tests/test_text.py::test_span_split",
"tests/test_text.py::test_span_move",
"tests/test_text.py::test_span_right_crop",
"tests/test_text.py::test_len",
"tests/test_text.py::test_cell_len",
"tests/test_text.py::test_bool",
"tests/test_text.py::test_str",
"tests/test_... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add a Text.from_ansi helper method
Add a simple little helper to run `AnsiDecoder.decode_line` over "pre-cooked" inputs.
Fixes issue: #1670
## Type of changes
- [x] Bug fix
- [x] New feature
- [x] Documentation / docstrings
- [x] Tests
## Checklist
- [x] I've run the latest [black](https://github.com/psf/black) with default args on new code.
- [x] I've updated CHANGELOG.md and CONTRIBUTORS.md where appropriate.
- [x] I've added tests for new code.
- [x] I accept that @willmcgugan may be pedantic in the code review.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rich/text.py]
(definition of Text.from_ansi:)
def from_ansi( cls, text: str, *, style: Union[str, Style] = "", justify: Optional["JustifyMethod"] = None, overflow: Optional["OverflowMethod"] = None, no_wrap: Optional[bool] = None, end: str = "\n", tab_size: Optional[int] = 8, ) -> "Text":
"""Create a Text object from pre-formatted ANSI.
Args:
text (str): A string containing ANSI color codes.
style (Union[str, Style], optional): Base style for text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\n".
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8."""
[end of new definitions in rich/text.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | b0661de34bab35af9b4b1d3ba8e28b186b225e84 | |
sqlfluff__sqlfluff-1917 | 1,917 | sqlfluff/sqlfluff | 0.6 | b5d67f1c615d6addd9169845617979d4155591fe | 2021-11-16T21:03:18Z | diff --git a/src/sqlfluff/core/default_config.cfg b/src/sqlfluff/core/default_config.cfg
index 7aa705fa2b6..1805c701b57 100644
--- a/src/sqlfluff/core/default_config.cfg
+++ b/src/sqlfluff/core/default_config.cfg
@@ -93,3 +93,6 @@ prefer_count_0 = False
[sqlfluff:rules:L052] # Semi-colon formatting approach.
semicolon_newline = False
require_final_semicolon = False
+
+[sqlfluff:rules:L054] # GROUP BY/ORDER BY column references.
+group_by_and_order_by_style = consistent
diff --git a/src/sqlfluff/core/rules/config_info.py b/src/sqlfluff/core/rules/config_info.py
index 1c34b2aeb92..251ec001451 100644
--- a/src/sqlfluff/core/rules/config_info.py
+++ b/src/sqlfluff/core/rules/config_info.py
@@ -117,6 +117,13 @@
"as it can cause issues when wrapping the query within other SQL queries)"
),
},
+ "group_by_and_order_by_style": {
+ "validation": ["consistent", "implicit", "explicit"],
+ "definition": (
+ "The expectation for using explicit column name references "
+ "or implicit positional references"
+ ),
+ },
}
diff --git a/src/sqlfluff/rules/L054.py b/src/sqlfluff/rules/L054.py
new file mode 100644
index 00000000000..e04d20091a8
--- /dev/null
+++ b/src/sqlfluff/rules/L054.py
@@ -0,0 +1,143 @@
+"""Implementation of Rule L054."""
+from typing import Optional
+
+from sqlfluff.core.rules.base import BaseRule, LintResult, RuleContext
+
+
+class Rule_L054(BaseRule):
+ """Inconsistent column references in GROUP BY/ORDER BY clauses.
+
+ | **Anti-pattern**
+ | A mix of implicit and explicit column references are used in a GROUP BY clause.
+
+ .. code-block:: sql
+ :force:
+
+ SELECT
+ foo,
+ bar,
+ sum(baz) AS sum_value
+ FROM fake_table
+ GROUP BY
+ foo, 2;
+
+ -- The same also applies to column
+ -- references in ORDER BY clauses.
+
+ SELECT
+ foo,
+ bar
+ FROM fake_table
+ ORDER BY
+ 1, bar;
+
+ | **Best practice**
+ | Reference all GROUP BY/ORDER BY columns either by name or by position.
+
+ .. code-block:: sql
+ :force:
+
+ -- GROUP BY: Explicit
+ SELECT
+ foo,
+ bar,
+ sum(baz) AS sum_value
+ FROM fake_table
+ GROUP BY
+ foo, bar;
+
+ -- ORDER BY: Explicit
+ SELECT
+ foo,
+ bar
+ FROM fake_table
+ ORDER BY
+ foo, bar;
+
+ -- GROUP BY: Implicit
+ SELECT
+ foo,
+ bar,
+ sum(baz) AS sum_value
+ FROM fake_table
+ GROUP BY
+ 1, 2;
+
+ -- ORDER BY: Implicit
+ SELECT
+ foo,
+ bar
+ FROM fake_table
+ ORDER BY
+ 1, 2;
+ """
+
+ config_keywords = ["group_by_and_order_by_style"]
+
+ def _eval(self, context: RuleContext) -> Optional[LintResult]:
+ """Inconsistent column references in GROUP BY/ORDER BY clauses."""
+ # Config type hints
+ self.group_by_and_order_by_style: str
+
+ # We only care about GROUP BY/ORDER BY clauses.
+ if not context.segment.is_type("groupby_clause", "orderby_clause"):
+ return None
+
+ # Look at child segments and map column references to either the implict or explicit category.
+ # N.B. segment names are used as the numeric literal type is 'raw', so best to be specific with the name.
+ column_reference_category_map = {
+ "ColumnReferenceSegment": "explicit",
+ "ExpressionSegment": "explicit",
+ "numeric_literal": "implicit",
+ }
+ column_reference_category_set = {
+ column_reference_category_map[segment.name]
+ for segment in context.segment.segments
+ if segment.name in column_reference_category_map
+ }
+
+ if self.group_by_and_order_by_style == "consistent":
+ # If consistent naming then raise lint error if either:
+
+ if len(column_reference_category_set) > 1:
+ # 1. Both implicit and explicit column references are found in the same clause.
+ return LintResult(
+ anchor=context.segment,
+ memory=context.memory,
+ )
+ else:
+ # 2. A clause is found to contain column name references that
+ # contradict the precedent set in earlier clauses.
+ current_group_by_order_by_convention = (
+ column_reference_category_set.pop()
+ )
+ prior_group_by_order_by_convention = context.memory.get(
+ "prior_group_by_order_by_convention"
+ )
+
+ if prior_group_by_order_by_convention and (
+ prior_group_by_order_by_convention
+ != current_group_by_order_by_convention
+ ):
+ return LintResult(
+ anchor=context.segment,
+ memory=context.memory,
+ )
+
+ context.memory[
+ "prior_group_by_order_by_convention"
+ ] = current_group_by_order_by_convention
+ else:
+ # If explicit or implicit naming then raise lint error
+ # if the opposite reference type is detected.
+ if any(
+ category != self.group_by_and_order_by_style
+ for category in column_reference_category_set
+ ):
+ return LintResult(
+ anchor=context.segment,
+ memory=context.memory,
+ )
+
+ # Return memory for later clauses.
+ return LintResult(memory=context.memory)
| diff --git a/test/rules/std_L054_test.py b/test/rules/std_L054_test.py
new file mode 100644
index 00000000000..64e86c67f05
--- /dev/null
+++ b/test/rules/std_L054_test.py
@@ -0,0 +1,33 @@
+"""Tests the python routines within L054."""
+import sqlfluff
+
+
+def test__rules__std_L054_raised() -> None:
+ """Test case for multiple L054 errors raised with 'consistent' setting."""
+ sql = """
+ SELECT
+ foo,
+ bar,
+ sum(baz) AS sum_value
+ FROM (
+ SELECT
+ foo,
+ bar,
+ sum(baz) AS baz
+ FROM fake_table
+ GROUP BY
+ foo, bar
+ )
+ GROUP BY
+ 1, 2
+ ORDER BY
+ 1, 2;
+ """
+ result = sqlfluff.lint(sql)
+
+ results_l054 = [r for r in result if r["code"] == "L054"]
+ assert len(results_l054) == 2
+ assert (
+ results_l054[0]["description"]
+ == "Inconsistent column references in GROUP BY/ORDER BY clauses."
+ )
| diff --git a/src/sqlfluff/core/default_config.cfg b/src/sqlfluff/core/default_config.cfg
index 7aa705fa2b6..1805c701b57 100644
--- a/src/sqlfluff/core/default_config.cfg
+++ b/src/sqlfluff/core/default_config.cfg
@@ -93,3 +93,6 @@ prefer_count_0 = False
[sqlfluff:rules:L052] # Semi-colon formatting approach.
semicolon_newline = False
require_final_semicolon = False
+
+[sqlfluff:rules:L054] # GROUP BY/ORDER BY column references.
+group_by_and_order_by_style = consistent
| [
{
"components": [
{
"doc": "Inconsistent column references in GROUP BY/ORDER BY clauses.\n\n| **Anti-pattern**\n| A mix of implicit and explicit column references are used in a GROUP BY clause.\n\n.. code-block:: sql\n :force:\n\n SELECT\n foo,\n bar,\n sum(baz) AS sum_va... | [
"test/rules/std_L054_test.py::test__rules__std_L054_raised"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
New Rule L054: Inconsistent column references in GROUP BY/ORDER BY clauses.
<!--Firstly, thanks for adding this feature! Secondly, please check the key steps against the checklist below to make your contribution easy to merge.-->
<!--Please give the Pull Request a meaningful title (including the dialect this PR is for if it is dialect specific), as this will automatically be added to the release notes, and then the Change Log.-->
### Brief summary of the change made
<!--If there is an open issue for this, then please include `fixes #XXXX` or `closes #XXXX` replacing `XXXX` with the issue number and it will automatically close the issue when the pull request is merged. Alternatively if not fully closed you can say `makes progress on #XXXX` to create a link on that issue without closing it.-->
This PR adds a new rule to prevent the use of implicit positional references in GROUP BY clauses.

N.B. this rule does not provide an automatic fix (reasons discussed below)
### Are there any other side effects of this change that we should be aware of?
No
### Pull Request checklist
- [X] Please confirm you have completed any of the necessary steps below.
- Included test cases to demonstrate any code changes, which may be one or more of the following:
- `.yml` rule test cases in `test/fixtures/rules/std_rule_cases`.
- `.sql`/`.yml` parser test cases in `test/fixtures/dialects` (note YML files can be auto generated with `python test/generate_parse_fixture_yml.py` or by running `tox` locally).
- Full autofix test cases in `test/fixtures/linter/autofix`.
- Other.
- Added appropriate documentation for the change.
- Created GitHub issues for any relevant followup/future enhancements if appropriate.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/sqlfluff/rules/L054.py]
(definition of Rule_L054:)
class Rule_L054(BaseRule):
"""Inconsistent column references in GROUP BY/ORDER BY clauses.
| **Anti-pattern**
| A mix of implicit and explicit column references are used in a GROUP BY clause.
.. code-block:: sql
:force:
SELECT
foo,
bar,
sum(baz) AS sum_value
FROM fake_table
GROUP BY
foo, 2;
-- The same also applies to column
-- references in ORDER BY clauses.
SELECT
foo,
bar
FROM fake_table
ORDER BY
1, bar;
| **Best practice**
| Reference all GROUP BY/ORDER BY columns either by name or by position.
.. code-block:: sql
:force:
-- GROUP BY: Explicit
SELECT
foo,
bar,
sum(baz) AS sum_value
FROM fake_table
GROUP BY
foo, bar;
-- ORDER BY: Explicit
SELECT
foo,
bar
FROM fake_table
ORDER BY
foo, bar;
-- GROUP BY: Implicit
SELECT
foo,
bar,
sum(baz) AS sum_value
FROM fake_table
GROUP BY
1, 2;
-- ORDER BY: Implicit
SELECT
foo,
bar
FROM fake_table
ORDER BY
1, 2;"""
(definition of Rule_L054._eval:)
def _eval(self, context: RuleContext) -> Optional[LintResult]:
"""Inconsistent column references in GROUP BY/ORDER BY clauses."""
[end of new definitions in src/sqlfluff/rules/L054.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | d3c40ff6539bdb512211ebf137d7fe3ebc9b585c | |
pylint-dev__pylint-5315 | 5,315 | pylint-dev/pylint | 2.11 | 0089cf3fd2d53b3c547e873bd9f728244d2c594b | 2021-11-15T21:29:43Z | diff --git a/ChangeLog b/ChangeLog
index 4a8b2eb81a..478b5587a6 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -70,6 +70,9 @@ Release date: TBA
Closes #3688
+* Added the ``--enable-all-extensions`` command line option. It will load all available extensions
+ which can be listed by running ``--list-extensions``
+
* Fix bug with importing namespace packages with relative imports
Closes #2967 and #5131
diff --git a/doc/whatsnew/2.12.rst b/doc/whatsnew/2.12.rst
index 97957d7c58..f39c3f56f9 100644
--- a/doc/whatsnew/2.12.rst
+++ b/doc/whatsnew/2.12.rst
@@ -191,3 +191,6 @@ Other Changes
Closes #5171
Follow-up in #5065
+
+* Added the ``--enable-all-extensions`` command line option. It will load all available extensions
+ which can be listed by running ``--list-extensions``
diff --git a/pylint/lint/run.py b/pylint/lint/run.py
index 46710dc2b1..ff2e6a2434 100644
--- a/pylint/lint/run.py
+++ b/pylint/lint/run.py
@@ -93,6 +93,7 @@ def __init__(
"init-hook": (cb_init_hook, True),
"rcfile": (self.cb_set_rcfile, True),
"load-plugins": (self.cb_add_plugins, True),
+ "enable-all-extensions": (self.cb_enable_all_extensions, False),
"verbose": (self.cb_verbose_mode, False),
"output": (self.cb_set_output, True),
},
@@ -258,6 +259,15 @@ def __init__(
"will be displayed.",
},
),
+ (
+ "enable-all-extensions",
+ {
+ "action": "callback",
+ "callback": self.cb_enable_all_extensions,
+ "help": "Load and enable all available extensions. "
+ "Use --list-extensions to see a list all available extensions.",
+ },
+ ),
),
option_groups=self.option_groups,
pylintrc=self._rcfile,
@@ -475,3 +485,17 @@ def cb_list_groups(self, *args, **kwargs):
def cb_verbose_mode(self, *args, **kwargs):
self.verbose = True
+
+ def cb_enable_all_extensions(self, option_name: str, value: None) -> None:
+ """Callback to load and enable all available extensions"""
+ for filename in os.listdir(os.path.dirname(extensions.__file__)):
+ # pylint: disable=fixme
+ # TODO: Remove the check for deprecated check_docs after the extension has been removed
+ if (
+ filename.endswith(".py")
+ and not filename.startswith("_")
+ and not filename.startswith("check_docs")
+ ):
+ extension_name = f"pylint.extensions.{filename[:-3]}"
+ if extension_name not in self._plugins:
+ self._plugins.append(extension_name)
| diff --git a/tests/test_self.py b/tests/test_self.py
index a4a3ac7940..8441a5acb0 100644
--- a/tests/test_self.py
+++ b/tests/test_self.py
@@ -58,7 +58,7 @@
import pytest
from py._path.local import LocalPath # type: ignore
-from pylint import modify_sys_path
+from pylint import extensions, modify_sys_path
from pylint.constants import MAIN_CHECKER_NAME, MSG_TYPES_STATUS
from pylint.lint import Run
from pylint.lint.pylinter import PyLinter
@@ -1237,3 +1237,25 @@ def test_output_file_specified_in_rcfile(self, tmpdir: LocalPath) -> None:
output_file,
expected_output=expected,
)
+
+ @staticmethod
+ def test_enable_all_extensions() -> None:
+ """Test to see if --enable-all-extensions does indeed load all extensions"""
+ # Record all extensions
+ plugins = []
+ for filename in os.listdir(os.path.dirname(extensions.__file__)):
+ # pylint: disable=fixme
+ # TODO: Remove the check for deprecated check_docs after the extension has been removed
+ if (
+ filename.endswith(".py")
+ and not filename.startswith("_")
+ and not filename.startswith("check_docs")
+ ):
+ plugins.append(f"pylint.extensions.{filename[:-3]}")
+
+ # Check if they are loaded
+ runner = Run(
+ ["--enable-all-extensions", join(HERE, "regrtest_data", "empty.py")],
+ exit=False,
+ )
+ assert sorted(plugins) == sorted(runner.linter._dynamic_plugins)
| diff --git a/ChangeLog b/ChangeLog
index 4a8b2eb81a..478b5587a6 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -70,6 +70,9 @@ Release date: TBA
Closes #3688
+* Added the ``--enable-all-extensions`` command line option. It will load all available extensions
+ which can be listed by running ``--list-extensions``
+
* Fix bug with importing namespace packages with relative imports
Closes #2967 and #5131
diff --git a/doc/whatsnew/2.12.rst b/doc/whatsnew/2.12.rst
index 97957d7c58..f39c3f56f9 100644
--- a/doc/whatsnew/2.12.rst
+++ b/doc/whatsnew/2.12.rst
@@ -191,3 +191,6 @@ Other Changes
Closes #5171
Follow-up in #5065
+
+* Added the ``--enable-all-extensions`` command line option. It will load all available extensions
+ which can be listed by running ``--list-extensions``
| [
{
"components": [
{
"doc": "Callback to load and enable all available extensions",
"lines": [
489,
501
],
"name": "Run.cb_enable_all_extensions",
"signature": "def cb_enable_all_extensions(self, option_name: str, value: None) -> None:",
"... | [
"tests/test_self.py::TestRunTC::test_enable_all_extensions"
] | [
"tests/test_self.py::TestRunTC::test_pkginfo",
"tests/test_self.py::TestRunTC::test_all",
"tests/test_self.py::TestRunTC::test_no_ext_file",
"tests/test_self.py::TestRunTC::test_w0704_ignored",
"tests/test_self.py::TestRunTC::test_exit_zero",
"tests/test_self.py::TestRunTC::test_generate_config_option",
... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add ``enable-all-extensions`` option
- [x] Add yourself to CONTRIBUTORS if you are a new contributor.
- [x] Add a ChangeLog entry describing what your PR does.
- [x] If it's a new feature, or an important bug fix, add a What's New entry in
`doc/whatsnew/<current release.rst>`.
- [x] Write a good description on what the PR does.
## Type of Changes
| | Type |
| --- | ---------------------- |
| ✓ | :sparkles: New feature |
## Description
@Pierre-Sassoulas 👀
Might as well put it in `2.12`.
Partial of #5306.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in pylint/lint/run.py]
(definition of Run.cb_enable_all_extensions:)
def cb_enable_all_extensions(self, option_name: str, value: None) -> None:
"""Callback to load and enable all available extensions"""
[end of new definitions in pylint/lint/run.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 0089cf3fd2d53b3c547e873bd9f728244d2c594b | |
RDFLib__rdflib-1463 | 1,463 | RDFLib/rdflib | null | a24d5349863254c07476b71aa7554a745c217abf | 2021-11-15T11:48:21Z | diff --git a/rdflib/parser.py b/rdflib/parser.py
index f0014150f..f7fb4127a 100644
--- a/rdflib/parser.py
+++ b/rdflib/parser.py
@@ -19,7 +19,6 @@
from urllib.request import Request
from urllib.request import url2pathname
-from urllib.parse import urljoin
from urllib.request import urlopen
from urllib.error import HTTPError
@@ -35,6 +34,7 @@
"StringInputSource",
"URLInputSource",
"FileInputSource",
+ "PythonInputSource",
]
@@ -104,6 +104,45 @@ def close(self):
pass
+class PythonInputSource(InputSource):
+ """
+ Constructs an RDFLib Parser InputSource from a Python data structure,
+ for example, loaded from JSON with json.load or json.loads:
+
+ >>> import json
+ >>> as_string = \"\"\"{
+ ... "@context" : {"ex" : "http://example.com/ns#"},
+ ... "@graph": [{"@type": "ex:item", "@id": "#example"}]
+ ... }\"\"\"
+ >>> as_python = json.loads(as_string)
+ >>> source = create_input_source(data=as_python)
+ >>> isinstance(source, PythonInputSource)
+ True
+ """
+
+ def __init__(self, data, system_id=None):
+ self.content_type = None
+ self.auto_close = False # see Graph.parse(), true if opened by us
+ self.public_id = None
+ self.system_id = system_id
+ self.data = data
+
+ def getPublicId(self):
+ return self.public_id
+
+ def setPublicId(self, public_id):
+ self.public_id = public_id
+
+ def getSystemId(self):
+ return self.system_id
+
+ def setSystemId(self, system_id):
+ self.system_id = system_id
+
+ def close(self):
+ self.data = None
+
+
class StringInputSource(InputSource):
"""
Constructs an RDFLib Parser InputSource from a Python String or Bytes
@@ -289,10 +328,15 @@ def create_input_source(
input_source = FileInputSource(file)
if data is not None:
- if not isinstance(data, (str, bytes, bytearray)):
- raise RuntimeError("parse data can only str, or bytes.")
- input_source = StringInputSource(data)
- auto_close = True
+ if isinstance(data, dict):
+ input_source = PythonInputSource(data)
+ auto_close = True
+ elif isinstance(data, (str, bytes, bytearray)):
+ input_source = StringInputSource(data)
+ auto_close = True
+ else:
+ raise RuntimeError(
+ f"parse data can only str, or bytes. not: {type(data)}")
if input_source is None:
raise Exception("could not create InputSource")
diff --git a/rdflib/plugins/shared/jsonld/util.py b/rdflib/plugins/shared/jsonld/util.py
index bd4c06aa7..392c822e0 100644
--- a/rdflib/plugins/shared/jsonld/util.py
+++ b/rdflib/plugins/shared/jsonld/util.py
@@ -17,12 +17,16 @@
from urllib.parse import urljoin, urlsplit, urlunsplit
-from rdflib.parser import create_input_source
+from rdflib.parser import create_input_source, PythonInputSource
from io import StringIO
def source_to_json(source):
+
+ if isinstance(source, PythonInputSource):
+ return source.data
+
# TODO: conneg for JSON (fix support in rdflib's URLInputSource!)
source = create_input_source(source, format="json-ld")
| diff --git a/test/jsonld/test_pythonparse.py b/test/jsonld/test_pythonparse.py
new file mode 100644
index 000000000..2957cb376
--- /dev/null
+++ b/test/jsonld/test_pythonparse.py
@@ -0,0 +1,76 @@
+from rdflib import Graph
+from rdflib.compare import isomorphic
+import json
+
+
+def test_wrap():
+ """
+ Example of intercepting a JSON-LD structure and performing some
+ in-memory manipulation and then passing that structure to Graph.parse
+ lists in the shacl graph.
+ """
+
+ _data = """
+ {
+ "@context" : {
+ "ngff" : "http://example.com/ns#"
+ },
+ "@graph": [{
+ "@type": "ngff:ItemList",
+ "ngff:collectionType": {"@type": "ngff:Image"},
+ "ngff:itemListElement": [
+ {
+ "@type": "ngff:Image",
+ "path": "image1",
+ "name": "Image 1"
+ },
+ {
+ "@type": "ngff:Image",
+ "path": "something-else",
+ "name": "bob"
+ }
+ ]
+ }]
+ }
+ """
+
+ # Current workaround
+ data = json.loads(_data)
+ data = walk(data)
+ data = json.dumps(data) # wasteful
+ g1 = Graph()
+ g1.parse(data=data, format="json-ld")
+
+ # Desired behavior
+ data = json.loads(_data)
+ data = walk(data)
+ g2 = Graph()
+ g2.parse(data=data, format="json-ld")
+
+ assert isomorphic(g1, g2)
+
+
+def walk(data, path=None):
+ """
+ Some arbitrary operation on a Python data structure.
+ """
+
+ if path is None:
+ path = []
+
+ if isinstance(data, dict):
+ for k, v in data.items():
+ data[k] = walk(v, path + [k])
+
+ elif isinstance(data, list):
+ replacement = list()
+ for idx, item in enumerate(data):
+ if path[-1] == "@graph":
+ replacement.append(walk(item, path))
+ else:
+ wrapper = {"@type": "ListItemWrapper", "ngff:position": idx}
+ wrapper["ngff:item"] = walk(item, path + [idx])
+ replacement.append(wrapper)
+ data = replacement
+
+ return data
| [
{
"components": [
{
"doc": "Constructs an RDFLib Parser InputSource from a Python data structure,\nfor example, loaded from JSON with json.load or json.loads:\n\n>>> import json\n>>> as_string = \"\"\"{\n... \"@context\" : {\"ex\" : \"http://example.com/ns#\"},\n... \"@graph\": [{\"@type\": \"... | [
"test/jsonld/test_pythonparse.py::test_wrap"
] | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
RFC: Add PythonInputSource to create py-based graphs
In order to manipulate a JSON-LD structure before creating a graph (specifically to simplify rdf:Seq handling), it is
currently necessary to use `json.loads` followed by `dumps` and then let `Graph().parse()` re-load. By detecting `dict`
instances and creating a `PythonInputSource`, a single call to `loads` suffices.
## Proposed Changes
- add rdflib.parser.PythonInputSource
- return instances from create_input_source
- detect instances in rdflib.plugins.shares.jsonld.util.source_to_json
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in rdflib/parser.py]
(definition of PythonInputSource:)
class PythonInputSource(InputSource):
"""Constructs an RDFLib Parser InputSource from a Python data structure,
for example, loaded from JSON with json.load or json.loads:
>>> import json
>>> as_string = """{
... "@context" : {"ex" : "http://example.com/ns#"},
... "@graph": [{"@type": "ex:item", "@id": "#example"}]
... }"""
>>> as_python = json.loads(as_string)
>>> source = create_input_source(data=as_python)
>>> isinstance(source, PythonInputSource)
True"""
(definition of PythonInputSource.__init__:)
def __init__(self, data, system_id=None):
(definition of PythonInputSource.getPublicId:)
def getPublicId(self):
(definition of PythonInputSource.setPublicId:)
def setPublicId(self, public_id):
(definition of PythonInputSource.getSystemId:)
def getSystemId(self):
(definition of PythonInputSource.setSystemId:)
def setSystemId(self, system_id):
(definition of PythonInputSource.close:)
def close(self):
[end of new definitions in rdflib/parser.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 0c11debb5178157baeac27b735e49a757916d2a6 | ||
sympy__sympy-22476 | 22,476 | sympy/sympy | 1.10 | eb0368b128ffd1a418f977b57ed1ba7b9c0d696c | 2021-11-12T17:48:33Z | diff --git a/sympy/matrices/expressions/matexpr.py b/sympy/matrices/expressions/matexpr.py
index 43d7164b5b09..78db834466e6 100644
--- a/sympy/matrices/expressions/matexpr.py
+++ b/sympy/matrices/expressions/matexpr.py
@@ -12,7 +12,7 @@
from sympy.functions import conjugate, adjoint
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.matrices.common import NonSquareMatrixError
-from sympy.matrices.matrices import MatrixKind
+from sympy.matrices.matrices import MatrixKind, MatrixBase
from sympy.multipledispatch import dispatch
from sympy.simplify import simplify
from sympy.utilities.misc import filldedent
@@ -512,7 +512,26 @@ def _postprocessor(expr):
}
-def _matrix_derivative(expr, x):
+def _matrix_derivative(expr, x, old_algorithm=False):
+
+ if isinstance(expr, MatrixBase) or isinstance(x, MatrixBase):
+ # Do not use array expressions for explicit matrices:
+ old_algorithm = True
+
+ if old_algorithm:
+ return _matrix_derivative_old_algorithm(expr, x)
+
+ from sympy.tensor.array.expressions.conv_matrix_to_array import convert_matrix_to_array
+ from sympy.tensor.array.expressions.arrayexpr_derivatives import array_derive
+ from sympy.tensor.array.expressions.conv_array_to_matrix import convert_array_to_matrix
+
+ array_expr = convert_matrix_to_array(expr)
+ diff_array_expr = array_derive(array_expr, x)
+ diff_matrix_expr = convert_array_to_matrix(diff_array_expr)
+ return diff_matrix_expr
+
+
+def _matrix_derivative_old_algorithm(expr, x):
from sympy.tensor.array.array_derivatives import ArrayDerivative
lines = expr._eval_derivative_matrix_lines(x)
| diff --git a/sympy/matrices/expressions/tests/test_derivatives.py b/sympy/matrices/expressions/tests/test_derivatives.py
index 4a8a64776540..e60fddebe582 100644
--- a/sympy/matrices/expressions/tests/test_derivatives.py
+++ b/sympy/matrices/expressions/tests/test_derivatives.py
@@ -3,6 +3,7 @@
http://www.math.uwaterloo.ca/~hwolkowi//matrixcookbook.pdf
"""
+from sympy.combinatorics import Permutation
from sympy.concrete.summations import Sum
from sympy.core.numbers import Rational
from sympy.core.singleton import S
@@ -23,6 +24,7 @@
from sympy.matrices.expressions.special import (Identity, ZeroMatrix)
from sympy.tensor.array.array_derivatives import ArrayDerivative
from sympy.matrices.expressions import hadamard_power
+from sympy.tensor.array.expressions.array_expressions import ArrayAdd, ArrayTensorProduct, PermuteDims
k = symbols("k")
i, j = symbols("i j")
@@ -85,11 +87,13 @@ def test_matrix_derivative_by_scalar():
def test_matrix_derivative_non_matrix_result():
# This is a 4-dimensional array:
- assert A.diff(A) == ArrayDerivative(A, A)
- assert A.T.diff(A) == ArrayDerivative(A.T, A)
- assert (2*A).diff(A) == ArrayDerivative(2*A, A)
- assert MatAdd(A, A).diff(A) == ArrayDerivative(MatAdd(A, A), A)
- assert (A + B).diff(A) == ArrayDerivative(A + B, A) # TODO: `B` can be removed.
+ I = Identity(k)
+ AdA = PermuteDims(ArrayTensorProduct(I, I), Permutation(3)(1, 2))
+ assert A.diff(A) == AdA
+ assert A.T.diff(A) == PermuteDims(ArrayTensorProduct(I, I), Permutation(3)(1, 2, 3))
+ assert (2*A).diff(A) == PermuteDims(ArrayTensorProduct(2*I, I), Permutation(3)(1, 2))
+ assert MatAdd(A, A).diff(A) == ArrayAdd(AdA, AdA)
+ assert (A + B).diff(A) == AdA
def test_matrix_derivative_trivial_cases():
@@ -172,7 +176,8 @@ def test_matrix_derivative_vectors_and_scalars():
def test_matrix_derivatives_of_traces():
expr = Trace(A)*A
- assert expr.diff(A) == ArrayDerivative(Trace(A)*A, A)
+ I = Identity(k)
+ assert expr.diff(A) == ArrayAdd(ArrayTensorProduct(I, A), PermuteDims(ArrayTensorProduct(Trace(A)*I, I), Permutation(3)(1, 2)))
assert expr[i, j].diff(A[m, n]).doit() == (
KDelta(i, m)*KDelta(j, n)*Trace(A) +
KDelta(m, n)*A[i, j]
@@ -317,7 +322,7 @@ def test_matrix_derivatives_of_traces():
def test_derivatives_of_complicated_matrix_expr():
expr = a.T*(A*X*(X.T*B + X*A) + B.T*X.T*(a*b.T*(X*D*X.T + X*(X.T*B + A*X)*D*B - X.T*C.T*A)*B + B*(X*D.T + B*A*X*A.T - 3*X*D))*B + 42*X*B*X.T*A.T*(X + X.T))*b
- result = (B*(B*A*X*A.T - 3*X*D + X*D.T) + a*b.T*(X*(A*X + X.T*B)*D*B + X*D*X.T - X.T*C.T*A)*B)*B*b*a.T*B.T + B**2*b*a.T*B.T*X.T*a*b.T*X*D + 42*A*X*B.T*X.T*a*b.T + B*D*B**3*b*a.T*B.T*X.T*a*b.T*X + B*b*a.T*A*X + 42*a*b.T*(X + X.T)*A*X*B.T + b*a.T*X*B*a*b.T*B.T**2*X*D.T + b*a.T*X*B*a*b.T*B.T**3*D.T*(B.T*X + X.T*A.T) + 42*b*a.T*X*B*X.T*A.T + 42*A.T*(X + X.T)*b*a.T*X*B + A.T*B.T**2*X*B*a*b.T*B.T*A + A.T*a*b.T*(A.T*X.T + B.T*X) + A.T*X.T*b*a.T*X*B*a*b.T*B.T**3*D.T + B.T*X*B*a*b.T*B.T*D - 3*B.T*X*B*a*b.T*B.T*D.T - C.T*A*B**2*b*a.T*B.T*X.T*a*b.T + X.T*A.T*a*b.T*A.T
+ result = (B*(B*A*X*A.T - 3*X*D + X*D.T) + a*b.T*(X*(A*X + X.T*B)*D*B + X*D*X.T - X.T*C.T*A)*B)*B*b*a.T*B.T + B**2*b*a.T*B.T*X.T*a*b.T*X*D + 42*A*X*B.T*X.T*a*b.T + B*D*B**3*b*a.T*B.T*X.T*a*b.T*X + B*b*a.T*A*X + a*b.T*(42*X + 42*X.T)*A*X*B.T + b*a.T*X*B*a*b.T*B.T**2*X*D.T + b*a.T*X*B*a*b.T*B.T**3*D.T*(B.T*X + X.T*A.T) + 42*b*a.T*X*B*X.T*A.T + A.T*(42*X + 42*X.T)*b*a.T*X*B + A.T*B.T**2*X*B*a*b.T*B.T*A + A.T*a*b.T*(A.T*X.T + B.T*X) + A.T*X.T*b*a.T*X*B*a*b.T*B.T**3*D.T + B.T*X*B*a*b.T*B.T*D - 3*B.T*X*B*a*b.T*B.T*D.T - C.T*A*B**2*b*a.T*B.T*X.T*a*b.T + X.T*A.T*a*b.T*A.T
assert expr.diff(X) == result
@@ -335,8 +340,8 @@ def test_mixed_deriv_mixed_expressions():
assert expr.diff(A) == (2*Trace(A))*Identity(k)
expr = Trace(A)*A
- # TODO: this is not yet supported:
- assert expr.diff(A) == ArrayDerivative(expr, A)
+ I = Identity(k)
+ assert expr.diff(A) == ArrayAdd(ArrayTensorProduct(I, A), PermuteDims(ArrayTensorProduct(Trace(A)*I, I), Permutation(3)(1, 2)))
expr = Trace(Trace(A)*A)
assert expr.diff(A) == (2*Trace(A))*Identity(k)
@@ -358,16 +363,16 @@ def test_derivatives_matrix_norms():
assert expr.diff(x) == x*(x.T*x)**Rational(-1, 2)
expr = (c.T*a*x.T*b)**S.Half
- assert expr.diff(x) == b/(2*sqrt(c.T*a*x.T*b))*c.T*a
+ assert expr.diff(x) == b*a.T*c/sqrt(c.T*a*x.T*b)/2
expr = (c.T*a*x.T*b)**Rational(1, 3)
- assert expr.diff(x) == b*(c.T*a*x.T*b)**Rational(-2, 3)*c.T*a/3
+ assert expr.diff(x) == b*a.T*c*(c.T*a*x.T*b)**Rational(-2, 3)/3
expr = (a.T*X*b)**S.Half
assert expr.diff(X) == a/(2*sqrt(a.T*X*b))*b.T
expr = d.T*x*(a.T*X*b)**S.Half*y.T*c
- assert expr.diff(X) == a*d.T*x/(2*sqrt(a.T*X*b))*y.T*c*b.T
+ assert expr.diff(X) == a/(2*sqrt(a.T*X*b))*x.T*d*y.T*c*b.T
def test_derivatives_elementwise_applyfunc():
@@ -424,7 +429,7 @@ def test_derivatives_elementwise_applyfunc():
expr = a.T*A*X.applyfunc(sin)*B*b
assert expr.diff(X).dummy_eq(
- DiagMatrix(A.T*a)*X.applyfunc(cos)*DiagMatrix(B*b))
+ HadamardProduct(A.T * a * b.T * B.T, X.applyfunc(cos)))
expr = a.T * (A*X.applyfunc(sin)*B).applyfunc(log) * b
# TODO: wrong
@@ -443,7 +448,7 @@ def test_derivatives_of_hadamard_expressions():
assert expr.diff(x) == DiagMatrix(hadamard_product(b, a))
expr = a.T*hadamard_product(A, X, B)*b
- assert expr.diff(X) == DiagMatrix(a)*hadamard_product(B, A)*DiagMatrix(b)
+ assert expr.diff(X) == HadamardProduct(a*b.T, A, B)
# Hadamard Power
@@ -460,4 +465,4 @@ def test_derivatives_of_hadamard_expressions():
assert expr.diff(X) == 2*a*a.T*X*b*b.T
expr = hadamard_power(a.T*X*b, S.Half)
- assert expr.diff(X) == a/2*hadamard_power(a.T*X*b, Rational(-1, 2))*b.T
+ assert expr.diff(X) == a/(2*sqrt(a.T*X*b))*b.T
| [
{
"components": [
{
"doc": "",
"lines": [
534,
578
],
"name": "_matrix_derivative_old_algorithm",
"signature": "def _matrix_derivative_old_algorithm(expr, x):",
"type": "function"
},
{
"doc": "",
"lines": [
... | [
"test_matrix_derivative_non_matrix_result",
"test_matrix_derivatives_of_traces",
"test_derivatives_of_complicated_matrix_expr",
"test_mixed_deriv_mixed_expressions",
"test_derivatives_matrix_norms",
"test_derivatives_elementwise_applyfunc"
] | [
"test_matrix_derivative_by_scalar",
"test_matrix_derivative_trivial_cases",
"test_matrix_derivative_with_inverse",
"test_matrix_derivative_vectors_and_scalars"
] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Matrix expression derivatives are now computed with the array expressions module
<!-- BEGIN RELEASE NOTES -->
- matrices
- Derivatives of matrix expressions are now computed in the array expressions module. The result is therefore able to represent higher dimensional arrays. If trivial dimensions are present, an attempt is made at converting the resulting array expression back to matrix expression.
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/matrices/expressions/matexpr.py]
(definition of _matrix_derivative_old_algorithm:)
def _matrix_derivative_old_algorithm(expr, x):
(definition of _matrix_derivative_old_algorithm._get_shape:)
def _get_shape(elem):
(definition of _matrix_derivative_old_algorithm.get_rank:)
def get_rank(parts):
(definition of _matrix_derivative_old_algorithm.contract_one_dims:)
def contract_one_dims(parts):
[end of new definitions in sympy/matrices/expressions/matexpr.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 3e8695add7a25c8d70aeba7d6137496df02863fd | ||
pydata__xarray-5981 | 5,981 | pydata/xarray | 0.20 | 126051f2bf2ddb7926a7da11b047b852d5ca6b87 | 2021-11-12T09:44:43Z | diff --git a/xarray/core/common.py b/xarray/core/common.py
index 3db9b1cfa0c..cf02bcff77b 100644
--- a/xarray/core/common.py
+++ b/xarray/core/common.py
@@ -158,6 +158,10 @@ def _repr_html_(self):
return f"<pre>{escape(repr(self))}</pre>"
return formatting_html.array_repr(self)
+ def __format__(self: Any, format_spec: str) -> str:
+ # we use numpy: scalars will print fine and arrays will raise
+ return self.values.__format__(format_spec)
+
def _iter(self: Any) -> Iterator[Any]:
for n in range(len(self)):
yield self[n]
| diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py
index 4bbf41c7b38..a5c044d8ea7 100644
--- a/xarray/tests/test_formatting.py
+++ b/xarray/tests/test_formatting.py
@@ -9,7 +9,7 @@
import xarray as xr
from xarray.core import formatting
-from . import requires_netCDF4
+from . import requires_dask, requires_netCDF4
class TestFormatting:
@@ -418,6 +418,26 @@ def test_array_repr_variable(self) -> None:
with xr.set_options(display_expand_data=False):
formatting.array_repr(var)
+ @requires_dask
+ def test_array_scalar_format(self) -> None:
+ var = xr.DataArray(0)
+ assert var.__format__("") == "0"
+ assert var.__format__("d") == "0"
+ assert var.__format__(".2f") == "0.00"
+
+ var = xr.DataArray([0.1, 0.2])
+ assert var.__format__("") == "[0.1 0.2]"
+ with pytest.raises(TypeError) as excinfo:
+ var.__format__(".2f")
+ assert "unsupported format string passed to" in str(excinfo.value)
+
+ # also check for dask
+ var = var.chunk(chunks={"dim_0": 1})
+ assert var.__format__("") == "[0.1 0.2]"
+ with pytest.raises(TypeError) as excinfo:
+ var.__format__(".2f")
+ assert "unsupported format string passed to" in str(excinfo.value)
+
def test_inline_variable_array_repr_custom_repr() -> None:
class CustomArray:
| [
{
"components": [
{
"doc": "",
"lines": [
161,
163
],
"name": "AbstractArray.__format__",
"signature": "def __format__(self: Any, format_spec: str) -> str:",
"type": "function"
}
],
"file": "xarray/core/common.py"
}
] | [
"xarray/tests/test_formatting.py::TestFormatting::test_array_scalar_format"
] | [
"xarray/tests/test_formatting.py::TestFormatting::test_get_indexer_at_least_n_items",
"xarray/tests/test_formatting.py::TestFormatting::test_first_n_items",
"xarray/tests/test_formatting.py::TestFormatting::test_last_n_items",
"xarray/tests/test_formatting.py::TestFormatting::test_last_item",
"xarray/tests/... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Allow string formatting of scalar DataArrays
- [x] Closes https://github.com/pydata/xarray/issues/5976
- [x] Tests added
- [x] Passes `pre-commit run --all-files`
- [ ] User visible changes (including notable bug fixes) are documented in `whats-new.rst`
This is a first try at formatting dataarray scalars. Here is the current behavior:
```python
In [1]: import xarray as xr
...: import numpy as np
In [2]: a = np.array(1)
...: da = xr.DataArray(a)
In [3]: print(a)
1
In [4]: print(da)
<xarray.DataArray ()>
array(1)
In [5]: print('{}'.format(a))
1
In [6]: print('{}'.format(da))
<xarray.DataArray ()>
array(1)
In [7]: print('{:.3f}'.format(a))
1.000
In [8]: print('{:.3f}'.format(da))
1.000
In [9]: a = np.array([1, 2])
...: da = xr.DataArray(a)
In [10]: print('{}'.format(a))
[1 2]
In [11]: print('{}'.format(da))
<xarray.DataArray (dim_0: 2)>
array([1, 2])
Dimensions without coordinates: dim_0
In [12]: print('{:.3f}'.format(a))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-12-c5afc7863e89> in <module>
----> 1 print('{:.3f}'.format(a))
TypeError: unsupported format string passed to numpy.ndarray.__format__
In [13]: print('{:.3f}'.format(da))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-bddebd8462bd> in <module>
----> 1 print('{:.3f}'.format(da))
~/disk/Dropbox/HomeDocs/git/xarray/xarray/core/common.py in __format__(self, format_spec)
162 return formatting.array_repr(self)
163 # Else why fall back to numpy
--> 164 return self.values.__format__(format_spec)
165
166 def _iter(self: Any) -> Iterator[Any]:
TypeError: unsupported format string passed to numpy.ndarray.__format__
```
I don't think there is any backwards compatibility issue but lets see if the tests pass
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in xarray/core/common.py]
(definition of AbstractArray.__format__:)
def __format__(self: Any, format_spec: str) -> str:
[end of new definitions in xarray/core/common.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 48290fa14accd3ac87768d3f73d69493b82b0be6 | ||
sqlfluff__sqlfluff-1840 | 1,840 | sqlfluff/sqlfluff | 0.6 | 5d8032d127db53fd7e2394a3ff8725de8cbe5833 | 2021-11-07T15:32:05Z | diff --git a/src/sqlfluff/rules/L050.py b/src/sqlfluff/rules/L050.py
new file mode 100644
index 00000000000..21cfff09a1f
--- /dev/null
+++ b/src/sqlfluff/rules/L050.py
@@ -0,0 +1,120 @@
+"""Implementation of Rule L050."""
+from typing import Optional
+
+from sqlfluff.core.rules.base import BaseRule, LintResult, LintFix, RuleContext
+from sqlfluff.core.rules.doc_decorators import document_fix_compatible
+
+
+@document_fix_compatible
+class Rule_L050(BaseRule):
+ """Files must not begin with newlines or whitespace.
+
+ | **Anti-pattern**
+ | The content in file begins with newlines or whitespace, the ^ represents the beginning of file.
+
+ .. code-block:: sql
+ :force:
+
+ ^
+
+ SELECT
+ a
+ FROM foo
+
+ -- Beginning on an indented line is also forbidden,
+ -- (the • represents space).
+
+ ••••SELECT
+ ••••a
+ FROM
+ ••••foo
+
+ | **Best practice**
+ | Start file on either code or comment, the ^ represents the beginning of file.
+
+ .. code-block:: sql
+ :force:
+
+
+ ^SELECT
+ a
+ FROM foo
+
+ -- Including an initial block comment.
+
+ ^/*
+ This is a description of my SQL code.
+ */
+ SELECT
+ a
+ FROM
+ foo
+
+ -- Including an initial inline comment.
+
+ ^--This is a description of my SQL code.
+ SELECT
+ a
+ FROM
+ foo
+ """
+
+ @staticmethod
+ def _potential_template_collision(context: RuleContext) -> bool:
+ """Check for any templated raw slices that intersect with source slices in the raw_stack.
+
+ Returns:
+ :obj:`bool` indicating a preceding templated raw slice has been detected.
+ """
+ templated_file = context.segment.pos_marker.templated_file
+ for segment in context.raw_stack:
+ source_slice = segment.pos_marker.source_slice
+ raw_slices = templated_file.raw_slices_spanning_source_slice(source_slice)
+ if any(
+ raw_slice
+ for raw_slice in raw_slices
+ if raw_slice.slice_type == "templated"
+ ):
+ return True
+
+ return False
+
+ def _eval(self, context: RuleContext) -> Optional[LintResult]:
+ """Files must not begin with newlines or whitespace."""
+ # If parent_stack is empty we are currently at FileSegment.
+ if len(context.parent_stack) == 0:
+ return None
+
+ # If raw_stack is empty there can be nothing to remove.
+ if len(context.raw_stack) == 0:
+ return None
+
+ # If the current segment is either comment or code and all
+ # previous segments are forms of whitespace then we can
+ # remove these earlier segments.
+ # Given the tree stucture, we make sure we are at the
+ # first leaf to avoid repeated detection.
+ whitespace_set = {"newline", "whitespace", "Indent", "Dedent"}
+ if (
+ # Non-whitespace segment.
+ (context.segment.name not in whitespace_set)
+ # We want first Non-whitespace segment so
+ # all preceding segments must be whitespace.
+ and all(segment.name in whitespace_set for segment in context.raw_stack)
+ # Found leaf of parse tree.
+ and (not context.segment.is_expandable)
+ ):
+ # It is possible that a template segment (e.g. {{ config(materialized='view') }})
+ # renders to an empty string and as such is omitted from the parsed tree.
+ # We therefore should flag if a templated raw slice intersects with the
+ # source slices in the raw stack and skip this rule to avoid risking
+ # collisions with template objects.
+ if self._potential_template_collision(context):
+ return None
+
+ return LintResult(
+ anchor=context.parent_stack[0],
+ fixes=[LintFix("delete", d) for d in context.raw_stack],
+ )
+
+ return None
| diff --git a/test/rules/std_L003_L036_L039_combo_test.py b/test/rules/std_L003_L036_L039_combo_test.py
index 70b99380d0e..01b8d36fd80 100644
--- a/test/rules/std_L003_L036_L039_combo_test.py
+++ b/test/rules/std_L003_L036_L039_combo_test.py
@@ -32,5 +32,5 @@ def test__rules__std_L003_L036_L039():
SELECT *
FROM example\n"""
- result = sqlfluff.fix(sql)
+ result = sqlfluff.fix(sql, exclude_rules=["L050"])
assert result == fixed_sql
diff --git a/test/rules/std_L050_test.py b/test/rules/std_L050_test.py
new file mode 100644
index 00000000000..4188f511b72
--- /dev/null
+++ b/test/rules/std_L050_test.py
@@ -0,0 +1,28 @@
+"""Tests the python routines within L050."""
+from sqlfluff.core import FluffConfig, Linter
+
+
+def test__rules__std_L050_no_jinja_violation_for_default_config() -> None:
+ """L050 is not raised for leading whitespace before a jinja template when using default config.
+
+ This is due to ignore_templated_areas=False in the default config.
+ """
+ sql = "\n{# I am a comment #}\nSELECT foo FROM bar\n"
+
+ cfg = FluffConfig.from_root()
+ lnt = Linter(config=cfg)
+ res = lnt.lint_string(in_str=sql)
+
+ assert len(res.violations) == 0
+
+
+def test__rules__std_L050_jinja_violation_for_custom_config() -> None:
+ """L050 is raised for leading whitespace before a jinja template when ignore_templated_areas=False."""
+ sql = "\n{# I am a comment #}\nSELECT foo FROM bar\n"
+
+ cfg = FluffConfig.from_root(overrides=dict(ignore_templated_areas=False))
+ lnt = Linter(config=cfg)
+ res = lnt.lint_string(in_str=sql)
+
+ assert len(res.violations) == 1
+ assert res.violations[0].rule.code == "L050"
| [
{
"components": [
{
"doc": "Files must not begin with newlines or whitespace.\n\n| **Anti-pattern**\n| The content in file begins with newlines or whitespace, the ^ represents the beginning of file.\n\n.. code-block:: sql\n :force:\n\n ^\n\n SELECT\n a\n FROM foo\n\n -- Beginn... | [
"test/rules/std_L050_test.py::test__rules__std_L050_jinja_violation_for_custom_config"
] | [
"test/rules/std_L003_L036_L039_combo_test.py::test__rules__std_L003_L036_L039",
"test/rules/std_L050_test.py::test__rules__std_L050_no_jinja_violation_for_default_config"
] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
New Rule L050: No leading whitespace
<!--Firstly, thanks for adding this feature! Secondly, please check the key steps against the checklist below to make your contribution easy to merge.-->
<!--Please give the Pull Request a meaningful title (including the dialect this PR is for if it is dialect specific), as this will automatically be added to the release notes, and then the Change Log.-->
### Brief summary of the change made
<!--If there is an open issue for this, then please include `fixes #XXXX` or `closes #XXXX` replacing `XXXX` with the issue number and it will automatically close the issue when the pull request is merged. Alternatively if not fully closed you can say `makes progress on #XXXX` to create a link on that issue without closing it.-->
This is a new rule which lints + fixes SQL beginning with newlines/whitespace. The logic is that the SQL should start with either a comment or the code. See gif below for demonstration.

### Are there any other side effects of this change that we should be aware of?
This adds new rule so doesn't impact existing rules.
### Pull Request checklist
- [X] Please confirm you have completed any of the necessary steps below.
- Included test cases to demonstrate any code changes, which may be one or more of the following:
- `.yml` rule test cases in `test/fixtures/rules/std_rule_cases`.
- `.sql`/`.yml` parser test cases in `test/fixtures/dialects` (note YML files can be auto generated with `python test/generate_parse_fixture_yml.py` or by running `tox` locally).
- Full autofix test cases in `test/fixtures/linter/autofix`.
- Other.
- Added appropriate documentation for the change.
- Created GitHub issues for any relevant followup/future enhancements if appropriate.
I've added the docstrings/unit tests. I believe the documentation is updated from the the docstrings but happy to add any additional required documentation if I've missed anything 😄
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in src/sqlfluff/rules/L050.py]
(definition of Rule_L050:)
class Rule_L050(BaseRule):
"""Files must not begin with newlines or whitespace.
| **Anti-pattern**
| The content in file begins with newlines or whitespace, the ^ represents the beginning of file.
.. code-block:: sql
:force:
^
SELECT
a
FROM foo
-- Beginning on an indented line is also forbidden,
-- (the • represents space).
••••SELECT
••••a
FROM
••••foo
| **Best practice**
| Start file on either code or comment, the ^ represents the beginning of file.
.. code-block:: sql
:force:
^SELECT
a
FROM foo
-- Including an initial block comment.
^/*
This is a description of my SQL code.
*/
SELECT
a
FROM
foo
-- Including an initial inline comment.
^--This is a description of my SQL code.
SELECT
a
FROM
foo"""
(definition of Rule_L050._potential_template_collision:)
def _potential_template_collision(context: RuleContext) -> bool:
"""Check for any templated raw slices that intersect with source slices in the raw_stack.
Returns:
:obj:`bool` indicating a preceding templated raw slice has been detected."""
(definition of Rule_L050._eval:)
def _eval(self, context: RuleContext) -> Optional[LintResult]:
"""Files must not begin with newlines or whitespace."""
[end of new definitions in src/sqlfluff/rules/L050.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | d3c40ff6539bdb512211ebf137d7fe3ebc9b585c | ||
sphinx-doc__sphinx-9822 | 9,822 | sphinx-doc/sphinx | 4.4 | 26a4f5d2b83fa55d47d31744089ac14edc08397f | 2021-11-06T14:50:44Z | diff --git a/CHANGES b/CHANGES
index d2ddae71c86..54b09e55635 100644
--- a/CHANGES
+++ b/CHANGES
@@ -47,6 +47,8 @@ Features added
* #9391: texinfo: improve variable in ``samp`` role
* #9578: texinfo: Add :confval:`texinfo_cross_references` to disable cross
references for readability with standalone readers
+* #9822 (and #9062), add new Intersphinx role :rst:role:`external` for explict
+ lookup in the external projects, without resolving to the local project.
Bugs fixed
----------
diff --git a/doc/usage/extensions/intersphinx.rst b/doc/usage/extensions/intersphinx.rst
index a3e65bed642..2bcce68d0ee 100644
--- a/doc/usage/extensions/intersphinx.rst
+++ b/doc/usage/extensions/intersphinx.rst
@@ -8,20 +8,25 @@
.. versionadded:: 0.5
-This extension can generate automatic links to the documentation of objects in
-other projects.
-
-Usage is simple: whenever Sphinx encounters a cross-reference that has no
-matching target in the current documentation set, it looks for targets in the
-documentation sets configured in :confval:`intersphinx_mapping`. A reference
-like ``:py:class:`zipfile.ZipFile``` can then link to the Python documentation
+This extension can generate links to the documentation of objects in external
+projects, either explicitly through the :rst:role:`external` role, or as a
+fallback resolution for any other cross-reference.
+
+Usage for fallback resolution is simple: whenever Sphinx encounters a
+cross-reference that has no matching target in the current documentation set,
+it looks for targets in the external documentation sets configured in
+:confval:`intersphinx_mapping`. A reference like
+``:py:class:`zipfile.ZipFile``` can then link to the Python documentation
for the ZipFile class, without you having to specify where it is located
exactly.
-When using the "new" format (see below), you can even force lookup in a foreign
-set by prefixing the link target appropriately. A link like ``:ref:`comparison
-manual <python:comparisons>``` will then link to the label "comparisons" in the
-doc set "python", if it exists.
+When using the :rst:role:`external` role, you can force lookup to any external
+projects, and optionally to a specific external project.
+A link like ``:external:ref:`comparison manual <comparisons>``` will then link
+to the label "comparisons" in whichever configured external project, if it
+exists,
+and a link like ``:external+python:ref:`comparison manual <comparisons>``` will
+link to the label "comparisons" only in the doc set "python", if it exists.
Behind the scenes, this works as follows:
@@ -30,8 +35,8 @@ Behind the scenes, this works as follows:
* Projects using the Intersphinx extension can specify the location of such
mapping files in the :confval:`intersphinx_mapping` config value. The mapping
- will then be used to resolve otherwise missing references to objects into
- links to the other documentation.
+ will then be used to resolve both :rst:role:`external` references, and also
+ otherwise missing references to objects into links to the other documentation.
* By default, the mapping file is assumed to be at the same location as the rest
of the documentation; however, the location of the mapping file can also be
@@ -79,10 +84,10 @@ linking:
at the same location as the base URI) or another local file path or a full
HTTP URI to an inventory file.
- The unique identifier can be used to prefix cross-reference targets, so that
+ The unique identifier can be used in the :rst:role:`external` role, so that
it is clear which intersphinx set the target belongs to. A link like
- ``:ref:`comparison manual <python:comparisons>``` will link to the label
- "comparisons" in the doc set "python", if it exists.
+ ``external:python+ref:`comparison manual <comparisons>``` will link to the
+ label "comparisons" in the doc set "python", if it exists.
**Example**
@@ -162,21 +167,50 @@ linking:
The default value is an empty list.
- When a cross-reference without an explicit inventory specification is being
- resolved by intersphinx, skip resolution if it matches one of the
- specifications in this list.
+ When a non-:rst:role:`external` cross-reference is being resolved by
+ intersphinx, skip resolution if it matches one of the specifications in this
+ list.
For example, with ``intersphinx_disabled_reftypes = ['std:doc']``
a cross-reference ``:doc:`installation``` will not be attempted to be
- resolved by intersphinx, but ``:doc:`otherbook:installation``` will be
- attempted to be resolved in the inventory named ``otherbook`` in
+ resolved by intersphinx, but ``:external+otherbook:doc:`installation``` will
+ be attempted to be resolved in the inventory named ``otherbook`` in
:confval:`intersphinx_mapping`.
At the same time, all cross-references generated in, e.g., Python,
declarations will still be attempted to be resolved by intersphinx.
- If ``*`` is in the list of domains, then no references without an explicit
- inventory will be resolved by intersphinx.
+ If ``*`` is in the list of domains, then no non-:rst:role:`external`
+ references will be resolved by intersphinx.
+
+Explicitly Reference External Objects
+-------------------------------------
+
+The Intersphinx extension provides the following role.
+
+.. rst:role:: external
+
+ .. versionadded:: 4.4
+
+ Use Intersphinx to perform lookup only in external projects, and not the
+ current project. Intersphinx still needs to know the type of object you
+ would like to find, so the general form of this role is to write the
+ cross-refererence as if the object is in the current project, but then prefix
+ it with ``:external``.
+ The two forms are then
+
+ - ``:external:domain:reftype:`target```,
+ e.g., ``:external:py:class:`zipfile.ZipFile```, or
+ - ``:external:reftype:`target```,
+ e.g., ``:external:doc:`installation```.
+
+ If you would like to constrain the lookup to a specific external project,
+ then the key of the project, as specified in :confval:`intersphinx_mapping`,
+ is added as well to get the two forms
+ - ``:external+invname:domain:reftype:`target```,
+ e.g., ``:external+python:py:class:`zipfile.ZipFile```, or
+ - ``:external+invname:reftype:`target```,
+ e.g., ``:external+python:doc:`installation```.
Showing all links of an Intersphinx mapping file
------------------------------------------------
diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py
index 7f3588ade79..2f8ab2588e7 100644
--- a/sphinx/ext/intersphinx.py
+++ b/sphinx/ext/intersphinx.py
@@ -26,15 +26,17 @@
import concurrent.futures
import functools
import posixpath
+import re
import sys
import time
from os import path
-from typing import IO, Any, Dict, List, Optional, Tuple
+from types import ModuleType
+from typing import IO, Any, Dict, List, Optional, Tuple, cast
from urllib.parse import urlsplit, urlunsplit
from docutils import nodes
-from docutils.nodes import Element, TextElement
-from docutils.utils import relative_path
+from docutils.nodes import Element, Node, TextElement, system_message
+from docutils.utils import Reporter, relative_path
import sphinx
from sphinx.addnodes import pending_xref
@@ -43,10 +45,13 @@
from sphinx.config import Config
from sphinx.domains import Domain
from sphinx.environment import BuildEnvironment
+from sphinx.errors import ExtensionError
from sphinx.locale import _, __
+from sphinx.transforms.post_transforms import ReferencesResolver
from sphinx.util import logging, requests
+from sphinx.util.docutils import CustomReSTDispatcher, SphinxRole
from sphinx.util.inventory import InventoryFile
-from sphinx.util.typing import Inventory, InventoryItem
+from sphinx.util.typing import Inventory, InventoryItem, RoleFunction
logger = logging.getLogger(__name__)
@@ -466,6 +471,144 @@ def missing_reference(app: Sphinx, env: BuildEnvironment, node: pending_xref,
return resolve_reference_detect_inventory(env, node, contnode)
+class IntersphinxDispatcher(CustomReSTDispatcher):
+ """Custom dispatcher for external role.
+
+ This enables :external:***:/:external+***: roles on parsing reST document.
+ """
+
+ def role(self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter
+ ) -> Tuple[RoleFunction, List[system_message]]:
+ if len(role_name) > 9 and role_name.startswith(('external:', 'external+')):
+ return IntersphinxRole(role_name), []
+ else:
+ return super().role(role_name, language_module, lineno, reporter)
+
+
+class IntersphinxRole(SphinxRole):
+ # group 1: just for the optionality of the inventory name
+ # group 2: the inventory name (optional)
+ # group 3: the domain:role or role part
+ _re_inv_ref = re.compile(r"(\+([^:]+))?:(.*)")
+
+ def __init__(self, orig_name: str) -> None:
+ self.orig_name = orig_name
+
+ def run(self) -> Tuple[List[Node], List[system_message]]:
+ assert self.name == self.orig_name.lower()
+ inventory, name_suffix = self.get_inventory_and_name_suffix(self.orig_name)
+ if inventory and not inventory_exists(self.env, inventory):
+ logger.warning(__('inventory for external cross-reference not found: %s'),
+ inventory, location=(self.env.docname, self.lineno))
+ return [], []
+
+ role_name = self.get_role_name(name_suffix)
+ if role_name is None:
+ logger.warning(__('role for external cross-reference not found: %s'), name_suffix,
+ location=(self.env.docname, self.lineno))
+ return [], []
+
+ result, messages = self.invoke_role(role_name)
+ for node in result:
+ if isinstance(node, pending_xref):
+ node['intersphinx'] = True
+ node['inventory'] = inventory
+
+ return result, messages
+
+ def get_inventory_and_name_suffix(self, name: str) -> Tuple[Optional[str], str]:
+ assert name.startswith('external'), name
+ assert name[8] in ':+', name
+ # either we have an explicit inventory name, i.e,
+ # :external+inv:role: or
+ # :external+inv:domain:role:
+ # or we look in all inventories, i.e.,
+ # :external:role: or
+ # :external:domain:role:
+ inv, suffix = IntersphinxRole._re_inv_ref.fullmatch(name, 8).group(2, 3)
+ return inv, suffix
+
+ def get_role_name(self, name: str) -> Optional[Tuple[str, str]]:
+ names = name.split(':')
+ if len(names) == 1:
+ # role
+ default_domain = self.env.temp_data.get('default_domain')
+ domain = default_domain.name if default_domain else None
+ role = names[0]
+ elif len(names) == 2:
+ # domain:role:
+ domain = names[0]
+ role = names[1]
+ else:
+ return None
+
+ if domain and self.is_existent_role(domain, role):
+ return (domain, role)
+ elif self.is_existent_role('std', role):
+ return ('std', role)
+ else:
+ return None
+
+ def is_existent_role(self, domain_name: str, role_name: str) -> bool:
+ try:
+ domain = self.env.get_domain(domain_name)
+ if role_name in domain.roles:
+ return True
+ else:
+ return False
+ except ExtensionError:
+ return False
+
+ def invoke_role(self, role: Tuple[str, str]) -> Tuple[List[Node], List[system_message]]:
+ domain = self.env.get_domain(role[0])
+ if domain:
+ role_func = domain.role(role[1])
+
+ return role_func(':'.join(role), self.rawtext, self.text, self.lineno,
+ self.inliner, self.options, self.content)
+ else:
+ return [], []
+
+
+class IntersphinxRoleResolver(ReferencesResolver):
+ """pending_xref node resolver for intersphinx role.
+
+ This resolves pending_xref nodes generated by :intersphinx:***: role.
+ """
+
+ default_priority = ReferencesResolver.default_priority - 1
+
+ def run(self, **kwargs: Any) -> None:
+ for node in self.document.traverse(pending_xref):
+ if 'intersphinx' not in node:
+ continue
+ contnode = cast(nodes.TextElement, node[0].deepcopy())
+ inv_name = node['inventory']
+ if inv_name is not None:
+ assert inventory_exists(self.env, inv_name)
+ newnode = resolve_reference_in_inventory(self.env, inv_name, node, contnode)
+ else:
+ newnode = resolve_reference_any_inventory(self.env, False, node, contnode)
+ if newnode is None:
+ typ = node['reftype']
+ msg = (__('external %s:%s reference target not found: %s') %
+ (node['refdomain'], typ, node['reftarget']))
+ logger.warning(msg, location=node, type='ref', subtype=typ)
+ node.replace_self(contnode)
+ else:
+ node.replace_self(newnode)
+
+
+def install_dispatcher(app: Sphinx, docname: str, source: List[str]) -> None:
+ """Enable IntersphinxDispatcher.
+
+ .. note:: The installed dispatcher will uninstalled on disabling sphinx_domain
+ automatically.
+ """
+ dispatcher = IntersphinxDispatcher()
+ dispatcher.enable()
+
+
def normalize_intersphinx_mapping(app: Sphinx, config: Config) -> None:
for key, value in config.intersphinx_mapping.copy().items():
try:
@@ -497,7 +640,9 @@ def setup(app: Sphinx) -> Dict[str, Any]:
app.add_config_value('intersphinx_disabled_reftypes', [], True)
app.connect('config-inited', normalize_intersphinx_mapping, priority=800)
app.connect('builder-inited', load_mappings)
+ app.connect('source-read', install_dispatcher)
app.connect('missing-reference', missing_reference)
+ app.add_post_transform(IntersphinxRoleResolver)
return {
'version': sphinx.__display_version__,
'env_version': 1,
diff --git a/sphinx/util/docutils.py b/sphinx/util/docutils.py
index c3a6ff9e2e9..5ab7666496d 100644
--- a/sphinx/util/docutils.py
+++ b/sphinx/util/docutils.py
@@ -166,16 +166,14 @@ def patch_docutils(confdir: Optional[str] = None) -> Generator[None, None, None]
yield
-class ElementLookupError(Exception):
- pass
-
+class CustomReSTDispatcher:
+ """Custom reST's mark-up dispatcher.
-class sphinx_domains:
- """Monkey-patch directive and role dispatch, so that domain-specific
- markup takes precedence.
+ This replaces docutils's directives and roles dispatch mechanism for reST parser
+ by original one temporarily.
"""
- def __init__(self, env: "BuildEnvironment") -> None:
- self.env = env
+
+ def __init__(self) -> None:
self.directive_func: Callable = lambda *args: (None, [])
self.roles_func: Callable = lambda *args: (None, [])
@@ -189,13 +187,35 @@ def enable(self) -> None:
self.directive_func = directives.directive
self.role_func = roles.role
- directives.directive = self.lookup_directive
- roles.role = self.lookup_role
+ directives.directive = self.directive
+ roles.role = self.role
def disable(self) -> None:
directives.directive = self.directive_func
roles.role = self.role_func
+ def directive(self,
+ directive_name: str, language_module: ModuleType, document: nodes.document
+ ) -> Tuple[Optional[Type[Directive]], List[system_message]]:
+ return self.directive_func(directive_name, language_module, document)
+
+ def role(self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter
+ ) -> Tuple[RoleFunction, List[system_message]]:
+ return self.role_func(role_name, language_module, lineno, reporter)
+
+
+class ElementLookupError(Exception):
+ pass
+
+
+class sphinx_domains(CustomReSTDispatcher):
+ """Monkey-patch directive and role dispatch, so that domain-specific
+ markup takes precedence.
+ """
+ def __init__(self, env: "BuildEnvironment") -> None:
+ self.env = env
+ super().__init__()
+
def lookup_domain_element(self, type: str, name: str) -> Any:
"""Lookup a markup element (directive or role), given its name which can
be a full name (with domain).
@@ -226,17 +246,20 @@ def lookup_domain_element(self, type: str, name: str) -> Any:
raise ElementLookupError
- def lookup_directive(self, directive_name: str, language_module: ModuleType, document: nodes.document) -> Tuple[Optional[Type[Directive]], List[system_message]]: # NOQA
+ def directive(self,
+ directive_name: str, language_module: ModuleType, document: nodes.document
+ ) -> Tuple[Optional[Type[Directive]], List[system_message]]:
try:
return self.lookup_domain_element('directive', directive_name)
except ElementLookupError:
- return self.directive_func(directive_name, language_module, document)
+ return super().directive(directive_name, language_module, document)
- def lookup_role(self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter) -> Tuple[RoleFunction, List[system_message]]: # NOQA
+ def role(self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter
+ ) -> Tuple[RoleFunction, List[system_message]]:
try:
return self.lookup_domain_element('role', role_name)
except ElementLookupError:
- return self.role_func(role_name, language_module, lineno, reporter)
+ return super().role(role_name, language_module, lineno, reporter)
class WarningStream:
| diff --git a/tests/roots/test-ext-intersphinx-role/conf.py b/tests/roots/test-ext-intersphinx-role/conf.py
new file mode 100644
index 00000000000..a54f5c2ad6b
--- /dev/null
+++ b/tests/roots/test-ext-intersphinx-role/conf.py
@@ -0,0 +1,3 @@
+extensions = ['sphinx.ext.intersphinx']
+# the role should not honor this conf var
+intersphinx_disabled_reftypes = ['*']
diff --git a/tests/roots/test-ext-intersphinx-role/index.rst b/tests/roots/test-ext-intersphinx-role/index.rst
new file mode 100644
index 00000000000..58edb7a1a01
--- /dev/null
+++ b/tests/roots/test-ext-intersphinx-role/index.rst
@@ -0,0 +1,44 @@
+- ``module1`` is only defined in ``inv``:
+ :external:py:mod:`module1`
+
+.. py:module:: module2
+
+- ``module2`` is defined here and also in ``inv``, but should resolve to inv:
+ :external:py:mod:`module2`
+
+- ``module3`` is not defined anywhere, so should warn:
+ :external:py:mod:`module3`
+
+.. py:module:: module10
+
+- ``module10`` is only defined here, but should still not be resolved to:
+ :external:py:mod:`module10`
+
+- a function in inv:
+ :external:py:func:`module1.func`
+- a method, but with old style inventory prefix, which shouldn't work:
+ :external:py:meth:`inv:Foo.bar`
+- a non-existing role:
+ :external:py:nope:`something`
+
+.. default-domain:: cpp
+
+- a type where the default domain is used to find the role:
+ :external:type:`std::uint8_t`
+- a non-existing role in default domain:
+ :external:nope:`somethingElse`
+
+- two roles in ``std`` which can be found without a default domain:
+
+ - :external:doc:`docname`
+ - :external:option:`ls -l`
+
+
+- a function with explicit inventory:
+ :external+inv:c:func:`CFunc`
+- a class with explicit non-existing inventory, which also has upper-case in name:
+ :external+invNope:cpp:class:`foo::Bar`
+
+
+- explicit title:
+ :external:cpp:type:`FoonsTitle <foons>`
diff --git a/tests/test_ext_intersphinx.py b/tests/test_ext_intersphinx.py
index 7f369e9a3cd..b2ad8afe52f 100644
--- a/tests/test_ext_intersphinx.py
+++ b/tests/test_ext_intersphinx.py
@@ -524,3 +524,48 @@ def log_message(*args, **kwargs):
stdout, stderr = capsys.readouterr()
assert stdout.startswith("c:function\n")
assert stderr == ""
+
+
+@pytest.mark.sphinx('html', testroot='ext-intersphinx-role')
+def test_intersphinx_role(app, warning):
+ inv_file = app.srcdir / 'inventory'
+ inv_file.write_bytes(inventory_v2)
+ app.config.intersphinx_mapping = {
+ 'inv': ('http://example.org/', inv_file),
+ }
+ app.config.intersphinx_cache_limit = 0
+ app.config.nitpicky = True
+
+ # load the inventory and check if it's done correctly
+ normalize_intersphinx_mapping(app, app.config)
+ load_mappings(app)
+
+ app.build()
+ content = (app.outdir / 'index.html').read_text()
+ wStr = warning.getvalue()
+
+ html = '<a class="reference external" href="http://example.org/{}" title="(in foo v2.0)">'
+ assert html.format('foo.html#module-module1') in content
+ assert html.format('foo.html#module-module2') in content
+ assert "WARNING: external py:mod reference target not found: module3" in wStr
+ assert "WARNING: external py:mod reference target not found: module10" in wStr
+
+ assert html.format('sub/foo.html#module1.func') in content
+ assert "WARNING: external py:meth reference target not found: inv:Foo.bar" in wStr
+
+ assert "WARNING: role for external cross-reference not found: py:nope" in wStr
+
+ # default domain
+ assert html.format('index.html#std_uint8_t') in content
+ assert "WARNING: role for external cross-reference not found: nope" in wStr
+
+ # std roles without domain prefix
+ assert html.format('docname.html') in content
+ assert html.format('index.html#cmdoption-ls-l') in content
+
+ # explicit inventory
+ assert html.format('cfunc.html#CFunc') in content
+ assert "WARNING: inventory for external cross-reference not found: invNope" in wStr
+
+ # explicit title
+ assert html.format('index.html#foons') in content
| diff --git a/CHANGES b/CHANGES
index d2ddae71c86..54b09e55635 100644
--- a/CHANGES
+++ b/CHANGES
@@ -47,6 +47,8 @@ Features added
* #9391: texinfo: improve variable in ``samp`` role
* #9578: texinfo: Add :confval:`texinfo_cross_references` to disable cross
references for readability with standalone readers
+* #9822 (and #9062), add new Intersphinx role :rst:role:`external` for explict
+ lookup in the external projects, without resolving to the local project.
Bugs fixed
----------
diff --git a/doc/usage/extensions/intersphinx.rst b/doc/usage/extensions/intersphinx.rst
index a3e65bed642..2bcce68d0ee 100644
--- a/doc/usage/extensions/intersphinx.rst
+++ b/doc/usage/extensions/intersphinx.rst
@@ -8,20 +8,25 @@
.. versionadded:: 0.5
-This extension can generate automatic links to the documentation of objects in
-other projects.
-
-Usage is simple: whenever Sphinx encounters a cross-reference that has no
-matching target in the current documentation set, it looks for targets in the
-documentation sets configured in :confval:`intersphinx_mapping`. A reference
-like ``:py:class:`zipfile.ZipFile``` can then link to the Python documentation
+This extension can generate links to the documentation of objects in external
+projects, either explicitly through the :rst:role:`external` role, or as a
+fallback resolution for any other cross-reference.
+
+Usage for fallback resolution is simple: whenever Sphinx encounters a
+cross-reference that has no matching target in the current documentation set,
+it looks for targets in the external documentation sets configured in
+:confval:`intersphinx_mapping`. A reference like
+``:py:class:`zipfile.ZipFile``` can then link to the Python documentation
for the ZipFile class, without you having to specify where it is located
exactly.
-When using the "new" format (see below), you can even force lookup in a foreign
-set by prefixing the link target appropriately. A link like ``:ref:`comparison
-manual <python:comparisons>``` will then link to the label "comparisons" in the
-doc set "python", if it exists.
+When using the :rst:role:`external` role, you can force lookup to any external
+projects, and optionally to a specific external project.
+A link like ``:external:ref:`comparison manual <comparisons>``` will then link
+to the label "comparisons" in whichever configured external project, if it
+exists,
+and a link like ``:external+python:ref:`comparison manual <comparisons>``` will
+link to the label "comparisons" only in the doc set "python", if it exists.
Behind the scenes, this works as follows:
@@ -30,8 +35,8 @@ Behind the scenes, this works as follows:
* Projects using the Intersphinx extension can specify the location of such
mapping files in the :confval:`intersphinx_mapping` config value. The mapping
- will then be used to resolve otherwise missing references to objects into
- links to the other documentation.
+ will then be used to resolve both :rst:role:`external` references, and also
+ otherwise missing references to objects into links to the other documentation.
* By default, the mapping file is assumed to be at the same location as the rest
of the documentation; however, the location of the mapping file can also be
@@ -79,10 +84,10 @@ linking:
at the same location as the base URI) or another local file path or a full
HTTP URI to an inventory file.
- The unique identifier can be used to prefix cross-reference targets, so that
+ The unique identifier can be used in the :rst:role:`external` role, so that
it is clear which intersphinx set the target belongs to. A link like
- ``:ref:`comparison manual <python:comparisons>``` will link to the label
- "comparisons" in the doc set "python", if it exists.
+ ``external:python+ref:`comparison manual <comparisons>``` will link to the
+ label "comparisons" in the doc set "python", if it exists.
**Example**
@@ -162,21 +167,50 @@ linking:
The default value is an empty list.
- When a cross-reference without an explicit inventory specification is being
- resolved by intersphinx, skip resolution if it matches one of the
- specifications in this list.
+ When a non-:rst:role:`external` cross-reference is being resolved by
+ intersphinx, skip resolution if it matches one of the specifications in this
+ list.
For example, with ``intersphinx_disabled_reftypes = ['std:doc']``
a cross-reference ``:doc:`installation``` will not be attempted to be
- resolved by intersphinx, but ``:doc:`otherbook:installation``` will be
- attempted to be resolved in the inventory named ``otherbook`` in
+ resolved by intersphinx, but ``:external+otherbook:doc:`installation``` will
+ be attempted to be resolved in the inventory named ``otherbook`` in
:confval:`intersphinx_mapping`.
At the same time, all cross-references generated in, e.g., Python,
declarations will still be attempted to be resolved by intersphinx.
- If ``*`` is in the list of domains, then no references without an explicit
- inventory will be resolved by intersphinx.
+ If ``*`` is in the list of domains, then no non-:rst:role:`external`
+ references will be resolved by intersphinx.
+
+Explicitly Reference External Objects
+-------------------------------------
+
+The Intersphinx extension provides the following role.
+
+.. rst:role:: external
+
+ .. versionadded:: 4.4
+
+ Use Intersphinx to perform lookup only in external projects, and not the
+ current project. Intersphinx still needs to know the type of object you
+ would like to find, so the general form of this role is to write the
+ cross-refererence as if the object is in the current project, but then prefix
+ it with ``:external``.
+ The two forms are then
+
+ - ``:external:domain:reftype:`target```,
+ e.g., ``:external:py:class:`zipfile.ZipFile```, or
+ - ``:external:reftype:`target```,
+ e.g., ``:external:doc:`installation```.
+
+ If you would like to constrain the lookup to a specific external project,
+ then the key of the project, as specified in :confval:`intersphinx_mapping`,
+ is added as well to get the two forms
+ - ``:external+invname:domain:reftype:`target```,
+ e.g., ``:external+python:py:class:`zipfile.ZipFile```, or
+ - ``:external+invname:reftype:`target```,
+ e.g., ``:external+python:doc:`installation```.
Showing all links of an Intersphinx mapping file
------------------------------------------------
| [
{
"components": [
{
"doc": "Custom dispatcher for external role.\n\nThis enables :external:***:/:external+***: roles on parsing reST document.",
"lines": [
474,
485
],
"name": "IntersphinxDispatcher",
"signature": "class IntersphinxDispatcher(Cus... | [
"tests/test_ext_intersphinx.py::test_intersphinx_role"
] | [
"tests/test_ext_intersphinx.py::test_fetch_inventory_redirection",
"tests/test_ext_intersphinx.py::test_missing_reference",
"tests/test_ext_intersphinx.py::test_missing_reference_pydomain",
"tests/test_ext_intersphinx.py::test_missing_reference_stddomain",
"tests/test_ext_intersphinx.py::test_missing_refere... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Intersphinx role (2)
Extension of #9062 (not sure if there is a good way simply to reuse that PR somehow)
### Feature or Bugfix
- Feature
### Purpose
Continuation of the work by @tk0miya in #9062 to add a role for performing explicit intersphinx lookups.
### Detail
- the role(-prefix) is called ``external``. I think it is ok to have a bit more verbose than just ``ext`` to make it stand out that this is not an ordinary cross-reference.
- https://github.com/sphinx-doc/sphinx/pull/9062#issuecomment-879861369, when a specific inventory lookup is requested I opted for ``+`` as the delimiter. When I tried the other options the inventory name sort of looked like part of the domain/reftype name instead of a separate item. Semantically a ``+`` also reads nicely as ``:external:python+py:class:`` indeed is requesting a lookup constrained to the ``python`` project and constrained to ``py:class`` objects.
- the role will not fall back to the local project. It will warn if the lookup does not go well:
- if the inventory name doesn't exist,
- if the reftype/domain:reftype doesn't exist as a role (taking default-domain and the special ``std`` domain shorthands into acocunt), and
- if a/the inventory doesn't contain the target.
- it looks like Docutils mandates that role names are made lower-case before the role function is called, which is not good when an inventory name is present. So I haxed the original role name into the role processing. Not sure if there is a better way. Having the inventory name as part of the target text is problematic as intersphinx has no knowledge of the target format, so extracting information from it seems brittle.
- Deprecating the old style prefixing will be done in a subsequent PR, but I think I got the intersphinx docs fully updated to mention this role instead.
### Relates
- #9062
- https://github.com/sphinx-doc/sphinx/pull/8418#issuecomment-758838589
- #9682
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sphinx/ext/intersphinx.py]
(definition of IntersphinxDispatcher:)
class IntersphinxDispatcher(CustomReSTDispatcher):
"""Custom dispatcher for external role.
This enables :external:***:/:external+***: roles on parsing reST document."""
(definition of IntersphinxDispatcher.role:)
def role(self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter ) -> Tuple[RoleFunction, List[system_message]]:
(definition of IntersphinxRole:)
class IntersphinxRole(SphinxRole):
(definition of IntersphinxRole.__init__:)
def __init__(self, orig_name: str) -> None:
(definition of IntersphinxRole.run:)
def run(self) -> Tuple[List[Node], List[system_message]]:
(definition of IntersphinxRole.get_inventory_and_name_suffix:)
def get_inventory_and_name_suffix(self, name: str) -> Tuple[Optional[str], str]:
(definition of IntersphinxRole.get_role_name:)
def get_role_name(self, name: str) -> Optional[Tuple[str, str]]:
(definition of IntersphinxRole.is_existent_role:)
def is_existent_role(self, domain_name: str, role_name: str) -> bool:
(definition of IntersphinxRole.invoke_role:)
def invoke_role(self, role: Tuple[str, str]) -> Tuple[List[Node], List[system_message]]:
(definition of IntersphinxRoleResolver:)
class IntersphinxRoleResolver(ReferencesResolver):
"""pending_xref node resolver for intersphinx role.
This resolves pending_xref nodes generated by :intersphinx:***: role."""
(definition of IntersphinxRoleResolver.run:)
def run(self, **kwargs: Any) -> None:
(definition of install_dispatcher:)
def install_dispatcher(app: Sphinx, docname: str, source: List[str]) -> None:
"""Enable IntersphinxDispatcher.
.. note:: The installed dispatcher will uninstalled on disabling sphinx_domain
automatically."""
[end of new definitions in sphinx/ext/intersphinx.py]
[start of new definitions in sphinx/util/docutils.py]
(definition of CustomReSTDispatcher:)
class CustomReSTDispatcher:
"""Custom reST's mark-up dispatcher.
This replaces docutils's directives and roles dispatch mechanism for reST parser
by original one temporarily."""
(definition of CustomReSTDispatcher.__init__:)
def __init__(self) -> None:
(definition of CustomReSTDispatcher.__enter__:)
def __enter__(self) -> None:
(definition of CustomReSTDispatcher.__exit__:)
def __exit__(self, exc_type: Type[Exception], exc_value: Exception, traceback: Any) -> None:
(definition of CustomReSTDispatcher.enable:)
def enable(self) -> None:
(definition of CustomReSTDispatcher.disable:)
def disable(self) -> None:
(definition of CustomReSTDispatcher.directive:)
def directive(self, directive_name: str, language_module: ModuleType, document: nodes.document ) -> Tuple[Optional[Type[Directive]], List[system_message]]:
(definition of CustomReSTDispatcher.role:)
def role(self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter ) -> Tuple[RoleFunction, List[system_message]]:
(definition of sphinx_domains.directive:)
def directive(self, directive_name: str, language_module: ModuleType, document: nodes.document ) -> Tuple[Optional[Type[Directive]], List[system_message]]:
(definition of sphinx_domains.role:)
def role(self, role_name: str, language_module: ModuleType, lineno: int, reporter: Reporter ) -> Tuple[RoleFunction, List[system_message]]:
[end of new definitions in sphinx/util/docutils.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | cee86909b9f4ca338bc41168e91226de520369c6 | |
scikit-learn__scikit-learn-21569 | 21,569 | scikit-learn/scikit-learn | 1.1 | bacc91cf1d4531bcc91aa60893fdf7df319485ec | 2021-11-06T08:15:04Z | diff --git a/doc/whats_new/v1.1.rst b/doc/whats_new/v1.1.rst
index 6a5b2d226cabe..5e67916888511 100644
--- a/doc/whats_new/v1.1.rst
+++ b/doc/whats_new/v1.1.rst
@@ -237,6 +237,15 @@ Changelog
instead of `__init__`.
:pr:`21434` by :user:`Krum Arnaudov <krumeto>`.
+- |Enhancement| Added the `get_feature_names_out` method and a new parameter
+ `feature_names_out` to :class:`preprocessing.FunctionTransformer`. You can set
+ `feature_names_out` to 'one-to-one' to use the input features names as the
+ output feature names, or you can set it to a callable that returns the output
+ feature names. This is especially useful when the transformer changes the
+ number of features. If `feature_names_out` is None (which is the default),
+ then `get_output_feature_names` is not defined.
+ :pr:`21569` by :user:`Aurélien Geron <ageron>`.
+
:mod:`sklearn.svm`
..................
diff --git a/sklearn/preprocessing/_function_transformer.py b/sklearn/preprocessing/_function_transformer.py
index 595ca0e0bbc1b..cea720aeb6a67 100644
--- a/sklearn/preprocessing/_function_transformer.py
+++ b/sklearn/preprocessing/_function_transformer.py
@@ -1,7 +1,14 @@
import warnings
+import numpy as np
+
from ..base import BaseEstimator, TransformerMixin
-from ..utils.validation import _allclose_dense_sparse, check_array
+from ..utils.metaestimators import available_if
+from ..utils.validation import (
+ _allclose_dense_sparse,
+ _check_feature_names_in,
+ check_array,
+)
def _identity(X):
@@ -61,6 +68,20 @@ class FunctionTransformer(TransformerMixin, BaseEstimator):
.. versionadded:: 0.20
+ feature_names_out : callable, 'one-to-one' or None, default=None
+ Determines the list of feature names that will be returned by the
+ `get_feature_names_out` method. If it is 'one-to-one', then the output
+ feature names will be equal to the input feature names. If it is a
+ callable, then it must take two positional arguments: this
+ `FunctionTransformer` (`self`) and an array-like of input feature names
+ (`input_features`). It must return an array-like of output feature
+ names. The `get_feature_names_out` method is only defined if
+ `feature_names_out` is not None.
+
+ See ``get_feature_names_out`` for more details.
+
+ .. versionadded:: 1.1
+
kw_args : dict, default=None
Dictionary of additional keyword arguments to pass to func.
@@ -113,6 +134,7 @@ def __init__(
validate=False,
accept_sparse=False,
check_inverse=True,
+ feature_names_out=None,
kw_args=None,
inv_kw_args=None,
):
@@ -121,6 +143,7 @@ def __init__(
self.validate = validate
self.accept_sparse = accept_sparse
self.check_inverse = check_inverse
+ self.feature_names_out = feature_names_out
self.kw_args = kw_args
self.inv_kw_args = inv_kw_args
@@ -198,6 +221,63 @@ def inverse_transform(self, X):
X = check_array(X, accept_sparse=self.accept_sparse)
return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args)
+ @available_if(lambda self: self.feature_names_out is not None)
+ def get_feature_names_out(self, input_features=None):
+ """Get output feature names for transformation.
+
+ This method is only defined if `feature_names_out` is not None.
+
+ Parameters
+ ----------
+ input_features : array-like of str or None, default=None
+ Input feature names.
+
+ - If `input_features` is None, then `feature_names_in_` is
+ used as the input feature names. If `feature_names_in_` is not
+ defined, then names are generated:
+ `[x0, x1, ..., x(n_features_in_)]`.
+ - If `input_features` is array-like, then `input_features` must
+ match `feature_names_in_` if `feature_names_in_` is defined.
+
+ Returns
+ -------
+ feature_names_out : ndarray of str objects
+ Transformed feature names.
+
+ - If `feature_names_out` is 'one-to-one', the input feature names
+ are returned (see `input_features` above). This requires
+ `feature_names_in_` and/or `n_features_in_` to be defined, which
+ is done automatically if `validate=True`. Alternatively, you can
+ set them in `func`.
+ - If `feature_names_out` is a callable, then it is called with two
+ arguments, `self` and `input_features`, and its return value is
+ returned by this method.
+ """
+ if hasattr(self, "n_features_in_") or input_features is not None:
+ input_features = _check_feature_names_in(self, input_features)
+ if self.feature_names_out == "one-to-one":
+ if input_features is None:
+ raise ValueError(
+ "When 'feature_names_out' is 'one-to-one', either "
+ "'input_features' must be passed, or 'feature_names_in_' "
+ "and/or 'n_features_in_' must be defined. If you set "
+ "'validate' to 'True', then they will be defined "
+ "automatically when 'fit' is called. Alternatively, you "
+ "can set them in 'func'."
+ )
+ names_out = input_features
+ elif callable(self.feature_names_out):
+ names_out = self.feature_names_out(self, input_features)
+ else:
+ raise ValueError(
+ f"feature_names_out={self.feature_names_out!r} is invalid. "
+ 'It must either be "one-to-one" or a callable with two '
+ "arguments: the function transformer and an array-like of "
+ "input feature names. The callable must return an array-like "
+ "of output feature names."
+ )
+ return np.asarray(names_out, dtype=object)
+
def _transform(self, X, func=None, kw_args=None):
if func is None:
func = _identity
| diff --git a/sklearn/preprocessing/tests/test_function_transformer.py b/sklearn/preprocessing/tests/test_function_transformer.py
index b1ba9ebe6b762..525accf4568de 100644
--- a/sklearn/preprocessing/tests/test_function_transformer.py
+++ b/sklearn/preprocessing/tests/test_function_transformer.py
@@ -176,6 +176,158 @@ def test_function_transformer_frame():
assert hasattr(X_df_trans, "loc")
+@pytest.mark.parametrize(
+ "X, feature_names_out, input_features, expected",
+ [
+ (
+ # NumPy inputs, default behavior: generate names
+ np.random.rand(100, 3),
+ "one-to-one",
+ None,
+ ("x0", "x1", "x2"),
+ ),
+ (
+ # Pandas input, default behavior: use input feature names
+ {"a": np.random.rand(100), "b": np.random.rand(100)},
+ "one-to-one",
+ None,
+ ("a", "b"),
+ ),
+ (
+ # NumPy input, feature_names_out=callable
+ np.random.rand(100, 3),
+ lambda transformer, input_features: ("a", "b"),
+ None,
+ ("a", "b"),
+ ),
+ (
+ # Pandas input, feature_names_out=callable
+ {"a": np.random.rand(100), "b": np.random.rand(100)},
+ lambda transformer, input_features: ("c", "d", "e"),
+ None,
+ ("c", "d", "e"),
+ ),
+ (
+ # NumPy input, feature_names_out=callable – default input_features
+ np.random.rand(100, 3),
+ lambda transformer, input_features: tuple(input_features) + ("a",),
+ None,
+ ("x0", "x1", "x2", "a"),
+ ),
+ (
+ # Pandas input, feature_names_out=callable – default input_features
+ {"a": np.random.rand(100), "b": np.random.rand(100)},
+ lambda transformer, input_features: tuple(input_features) + ("c",),
+ None,
+ ("a", "b", "c"),
+ ),
+ (
+ # NumPy input, input_features=list of names
+ np.random.rand(100, 3),
+ "one-to-one",
+ ("a", "b", "c"),
+ ("a", "b", "c"),
+ ),
+ (
+ # Pandas input, input_features=list of names
+ {"a": np.random.rand(100), "b": np.random.rand(100)},
+ "one-to-one",
+ ("a", "b"), # must match feature_names_in_
+ ("a", "b"),
+ ),
+ (
+ # NumPy input, feature_names_out=callable, input_features=list
+ np.random.rand(100, 3),
+ lambda transformer, input_features: tuple(input_features) + ("d",),
+ ("a", "b", "c"),
+ ("a", "b", "c", "d"),
+ ),
+ (
+ # Pandas input, feature_names_out=callable, input_features=list
+ {"a": np.random.rand(100), "b": np.random.rand(100)},
+ lambda transformer, input_features: tuple(input_features) + ("c",),
+ ("a", "b"), # must match feature_names_in_
+ ("a", "b", "c"),
+ ),
+ ],
+)
+def test_function_transformer_get_feature_names_out(
+ X, feature_names_out, input_features, expected
+):
+ if isinstance(X, dict):
+ pd = pytest.importorskip("pandas")
+ X = pd.DataFrame(X)
+
+ transformer = FunctionTransformer(
+ feature_names_out=feature_names_out, validate=True
+ )
+ transformer.fit_transform(X)
+ names = transformer.get_feature_names_out(input_features)
+ assert isinstance(names, np.ndarray)
+ assert names.dtype == object
+ assert_array_equal(names, expected)
+
+
+def test_function_transformer_get_feature_names_out_without_validation():
+ transformer = FunctionTransformer(feature_names_out="one-to-one", validate=False)
+ X = np.random.rand(100, 2)
+ transformer.fit_transform(X)
+
+ msg = "When 'feature_names_out' is 'one-to-one', either"
+ with pytest.raises(ValueError, match=msg):
+ transformer.get_feature_names_out()
+
+ names = transformer.get_feature_names_out(("a", "b"))
+ assert isinstance(names, np.ndarray)
+ assert names.dtype == object
+ assert_array_equal(names, ("a", "b"))
+
+
+@pytest.mark.parametrize("feature_names_out", ["x0", ["x0"], ("x0",)])
+def test_function_transformer_feature_names_out_string(feature_names_out):
+ transformer = FunctionTransformer(feature_names_out=feature_names_out)
+ X = np.random.rand(100, 2)
+ transformer.fit_transform(X)
+
+ msg = """must either be "one-to-one" or a callable"""
+ with pytest.raises(ValueError, match=msg):
+ transformer.get_feature_names_out()
+
+
+def test_function_transformer_feature_names_out_is_None():
+ transformer = FunctionTransformer()
+ X = np.random.rand(100, 2)
+ transformer.fit_transform(X)
+
+ msg = "This 'FunctionTransformer' has no attribute 'get_feature_names_out'"
+ with pytest.raises(AttributeError, match=msg):
+ transformer.get_feature_names_out()
+
+
+def test_function_transformer_feature_names_out_uses_estimator():
+ def add_n_random_features(X, n):
+ return np.concatenate([X, np.random.rand(len(X), n)], axis=1)
+
+ def feature_names_out(transformer, input_features):
+ n = transformer.kw_args["n"]
+ return list(input_features) + [f"rnd{i}" for i in range(n)]
+
+ transformer = FunctionTransformer(
+ func=add_n_random_features,
+ feature_names_out=feature_names_out,
+ kw_args=dict(n=3),
+ validate=True,
+ )
+ pd = pytest.importorskip("pandas")
+ df = pd.DataFrame({"a": np.random.rand(100), "b": np.random.rand(100)})
+ transformer.fit_transform(df)
+ names = transformer.get_feature_names_out()
+
+ assert isinstance(names, np.ndarray)
+ assert names.dtype == object
+ assert_array_equal(names, ("a", "b", "rnd0", "rnd1", "rnd2"))
+
+
def test_function_transformer_validate_inverse():
"""Test that function transformer does not reset estimator in
`inverse_transform`."""
| diff --git a/doc/whats_new/v1.1.rst b/doc/whats_new/v1.1.rst
index 6a5b2d226cabe..5e67916888511 100644
--- a/doc/whats_new/v1.1.rst
+++ b/doc/whats_new/v1.1.rst
@@ -237,6 +237,15 @@ Changelog
instead of `__init__`.
:pr:`21434` by :user:`Krum Arnaudov <krumeto>`.
+- |Enhancement| Added the `get_feature_names_out` method and a new parameter
+ `feature_names_out` to :class:`preprocessing.FunctionTransformer`. You can set
+ `feature_names_out` to 'one-to-one' to use the input features names as the
+ output feature names, or you can set it to a callable that returns the output
+ feature names. This is especially useful when the transformer changes the
+ number of features. If `feature_names_out` is None (which is the default),
+ then `get_output_feature_names` is not defined.
+ :pr:`21569` by :user:`Aurélien Geron <ageron>`.
+
:mod:`sklearn.svm`
..................
| [
{
"components": [
{
"doc": "Get output feature names for transformation.\n\nThis method is only defined if `feature_names_out` is not None.\n\nParameters\n----------\ninput_features : array-like of str or None, default=None\n Input feature names.\n\n - If `input_features` is None, then `feat... | [
"sklearn/preprocessing/tests/test_function_transformer.py::test_function_transformer_get_feature_names_out[X0-one-to-one-None-expected0]",
"sklearn/preprocessing/tests/test_function_transformer.py::test_function_transformer_get_feature_names_out[X1-one-to-one-None-expected1]",
"sklearn/preprocessing/tests/test_... | [
"sklearn/preprocessing/tests/test_function_transformer.py::test_delegate_to_func",
"sklearn/preprocessing/tests/test_function_transformer.py::test_np_log",
"sklearn/preprocessing/tests/test_function_transformer.py::test_kw_arg",
"sklearn/preprocessing/tests/test_function_transformer.py::test_kw_arg_update",
... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
ENH Add get_feature_names_out to FunctionTransformer
#### Reference Issues/PRs
Follow-up on #18444.
Part of #21308.
This new feature was discussed in #21079.
#### What does this implement/fix? Explain your changes.
Adds the `get_feature_names_out` method and a new parameter `feature_names_out` to `preprocessing.FunctionTransformer`. By default, `get_feature_names_out` returns the input feature names, but you can set `feature_names_out` to return a different list, which is especially useful when the number of output features differs from the number of input features.
For example, here's a `FunctionTransformer` that outputs a single feature, equal to the input's mean along axis=1:
```python
import numpy as np
from sklearn.preprocessing import FunctionTransformer
mean_transformer = FunctionTransformer(
func=lambda X: X.mean(axis=1, keepdims=True),
feature_names_out=["mean"]
)
X_trans = mean_transformer.fit_transform(np.random.rand(10,2))
print(mean_transformer.get_feature_names_out()) # prints ['mean']
```
The `feature_names_out` parameter may also be a callable. This is useful if the output feature names depend on the input feature names, and/or if they depend on parameters like `kw_args`. Here's an example that uses both. It's a transformer that appends `n` random features to existing features:
```python
import numpy as np
import pandas as pd
from sklearn.preprocessing import FunctionTransformer
def add_n_random_features(X, n):
return np.concatenate([X, np.random.rand(len(X), n)], axis=1)
def feature_names_out(transformer, input_features):
n = transformer.kw_args["n"]
return list(input_features) + [f"rnd{i}" for i in range(n)]
transformer = FunctionTransformer(
func=add_n_random_features,
feature_names_out=feature_names_out,
kw_args=dict(n=3),
validate=True, # IMPORTANT (see discussion below)
)
df = pd.DataFrame({"a": np.random.rand(100), "b": np.random.rand(100)})
X_trans = transformer.fit_transform(df)
print(transformer.get_feature_names_out()) # prints ['a' 'b' 'rnd0' 'rnd1' 'rnd2']
```
#### Any other comments?
I have some concerns regarding the fact that `validate` is `False` by default, which means that `n_features_in_` and `feature_names_in_` are not set automatically. So if you create a `FunctionTransformer` with the default `validate=False` and `feature_names_out=None`, then when you call `get_feature_names_out` without any argument, it will raise an exception (unless `transform` was called before and `func` set `n_feature_in_` or `feature_names_in_`). I tried to make this clear in the error message, but I'm worried that this will confuse users. Wdyt?
And if `validate=False` and you set `feature_names_out` to a callable, and call `get_feature_names_out` with no arguments, then the callable will get `input_features=None` as input (unless `transform` was called before and `func` set `n_features_in_` or `feature_names_in_`). Users may be surprised by this. Should we output a warning in this case? Wdyt?
Moreover, as shown in the second code example above, the output feature names may depend on `kw_args`, so if `feature_names_out` is a callable, `get_feature_names_out` passes `self` to it, plus the `input_features`. I considered checking `feature_names_out.__code__.co_varnames` to decide whether to pass no arguments, or just the `input_features`, or the `input_features` and `self`. But `__code__` is not used anywhere in the code base, and `inspect` is not used much, so I'm not sure whether such introspection would be frowned upon? I decided that it was simple enough to require users to always have two arguments: the transformer itself, and the `input_features`. Wdyt?
Lastly, when users want to create a `FunctionTransformer` that outputs a single feature, I expect that many will be tempted to set `feature_names_out` to a string instead of a list. To keep things consistent, I decided to raise an exception in this case, and have a clear error message to tell them to use `["foo"]` instead. Wdyt?
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sklearn/preprocessing/_function_transformer.py]
(definition of FunctionTransformer.get_feature_names_out:)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
This method is only defined if `feature_names_out` is not None.
Parameters
----------
input_features : array-like of str or None, default=None
Input feature names.
- If `input_features` is None, then `feature_names_in_` is
used as the input feature names. If `feature_names_in_` is not
defined, then names are generated:
`[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
- If `feature_names_out` is 'one-to-one', the input feature names
are returned (see `input_features` above). This requires
`feature_names_in_` and/or `n_features_in_` to be defined, which
is done automatically if `validate=True`. Alternatively, you can
set them in `func`.
- If `feature_names_out` is a callable, then it is called with two
arguments, `self` and `input_features`, and its return value is
returned by this method."""
[end of new definitions in sklearn/preprocessing/_function_transformer.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 38ff5be25d0164bf9598bcfdde3b791ad6e261b0 | |
boto__boto3-3060 | 3,060 | boto/boto3 | null | ce777751a762bdf6fc299f53f3f16241f5f63ee0 | 2021-10-27T01:27:59Z | diff --git a/.changes/next-release/enhancement-Session-24336.json b/.changes/next-release/enhancement-Session-24336.json
new file mode 100644
index 0000000000..9e2ef41aa7
--- /dev/null
+++ b/.changes/next-release/enhancement-Session-24336.json
@@ -0,0 +1,5 @@
+{
+ "type": "enhancement",
+ "category": "Session",
+ "description": "Added `get_partition_for_region` to lookup partition for a given region_name"
+}
diff --git a/boto3/session.py b/boto3/session.py
index 49c17039db..3a3a654ebc 100644
--- a/boto3/session.py
+++ b/boto3/session.py
@@ -182,6 +182,18 @@ def get_credentials(self):
"""
return self._session.get_credentials()
+ def get_partition_for_region(self, region_name):
+ """Lists the partition name of a particular region.
+
+ :type region_name: string
+ :param region_name: Name of the region to list partition for (e.g.,
+ us-east-1).
+
+ :rtype: string
+ :return: Returns the respective partition name (e.g., aws).
+ """
+ return self._session.get_partition_for_region(region_name)
+
def client(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
| diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py
index 1fd7877f40..94301152d5 100644
--- a/tests/unit/test_session.py
+++ b/tests/unit/test_session.py
@@ -207,6 +207,17 @@ def test_get_available_regions(self):
)
assert partitions == ['foo']
+ def test_get_partition_for_region(self):
+ bc_session = mock.Mock()
+ bc_session.get_partition_for_region.return_value = 'baz'
+ session = Session(botocore_session=bc_session)
+
+ partition = session.get_partition_for_region('foo-bar-1')
+ bc_session.get_partition_for_region.assert_called_with(
+ 'foo-bar-1'
+ )
+ assert partition == 'baz'
+
def test_create_client(self):
session = Session(region_name='us-east-1')
client = session.client('sqs', region_name='us-west-2')
| diff --git a/.changes/next-release/enhancement-Session-24336.json b/.changes/next-release/enhancement-Session-24336.json
new file mode 100644
index 0000000000..9e2ef41aa7
--- /dev/null
+++ b/.changes/next-release/enhancement-Session-24336.json
@@ -0,0 +1,5 @@
+{
+ "type": "enhancement",
+ "category": "Session",
+ "description": "Added `get_partition_for_region` to lookup partition for a given region_name"
+}
| [
{
"components": [
{
"doc": "Lists the partition name of a particular region.\n\n:type region_name: string\n:param region_name: Name of the region to list partition for (e.g.,\n us-east-1).\n\n:rtype: string\n:return: Returns the respective partition name (e.g., aws).",
"lines": [
... | [
"tests/unit/test_session.py::TestSession::test_get_partition_for_region"
] | [
"tests/unit/test_session.py::TestSession::test_arguments_not_required",
"tests/unit/test_session.py::TestSession::test_available_profiles",
"tests/unit/test_session.py::TestSession::test_bad_resource_name",
"tests/unit/test_session.py::TestSession::test_bad_resource_name_with_no_client_has_simple_err_msg",
... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add get_partition_for_region to Session
Following up on boto/botocore#1715 to surface `get_partition_for_region` on the Boto3 session as well.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in boto3/session.py]
(definition of Session.get_partition_for_region:)
def get_partition_for_region(self, region_name):
"""Lists the partition name of a particular region.
:type region_name: string
:param region_name: Name of the region to list partition for (e.g.,
us-east-1).
:rtype: string
:return: Returns the respective partition name (e.g., aws)."""
[end of new definitions in boto3/session.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 196a2da7490a1a661a0103b8770bd31e34e147f2 | |
sympy__sympy-22347 | 22,347 | sympy/sympy | 1.10 | d6ab7d2e9ee8f25c2ed926648d2d3c8700dbaf0c | 2021-10-22T01:43:14Z | diff --git a/sympy/__init__.py b/sympy/__init__.py
index 8faadb93704d..cabb3f156725 100644
--- a/sympy/__init__.py
+++ b/sympy/__init__.py
@@ -110,7 +110,7 @@ def __sympy_debug():
from .functions import (factorial, factorial2, rf, ff, binomial,
RisingFactorial, FallingFactorial, subfactorial, carmichael,
fibonacci, lucas, motzkin, tribonacci, harmonic, bernoulli, bell, euler,
- catalan, genocchi, partition, sqrt, root, Min, Max, Id, real_root,
+ catalan, genocchi, partition, sqrt, root, Min, Max, Id, real_root, Rem,
cbrt, re, im, sign, Abs, conjugate, arg, polar_lift,
periodic_argument, unbranched_argument, principal_branch, transpose,
adjoint, polarify, unpolarify, sin, cos, tan, sec, csc, cot, sinc,
@@ -327,7 +327,7 @@ def __sympy_debug():
'factorial', 'factorial2', 'rf', 'ff', 'binomial', 'RisingFactorial',
'FallingFactorial', 'subfactorial', 'carmichael', 'fibonacci', 'lucas',
'motzkin', 'tribonacci', 'harmonic', 'bernoulli', 'bell', 'euler', 'catalan',
- 'genocchi', 'partition', 'sqrt', 'root', 'Min', 'Max', 'Id', 'real_root',
+ 'genocchi', 'partition', 'sqrt', 'root', 'Min', 'Max', 'Id', 'real_root', 'Rem',
'cbrt', 're', 'im', 'sign', 'Abs', 'conjugate', 'arg', 'polar_lift',
'periodic_argument', 'unbranched_argument', 'principal_branch',
'transpose', 'adjoint', 'polarify', 'unpolarify', 'sin', 'cos', 'tan',
diff --git a/sympy/functions/__init__.py b/sympy/functions/__init__.py
index eb5ed382c25c..00b4f0d30663 100644
--- a/sympy/functions/__init__.py
+++ b/sympy/functions/__init__.py
@@ -10,7 +10,7 @@
from sympy.functions.combinatorial.numbers import (carmichael, fibonacci, lucas, tribonacci,
harmonic, bernoulli, bell, euler, catalan, genocchi, partition, motzkin)
from sympy.functions.elementary.miscellaneous import (sqrt, root, Min, Max,
- Id, real_root, cbrt)
+ Id, real_root, cbrt, Rem)
from sympy.functions.elementary.complexes import (re, im, sign, Abs,
conjugate, arg, polar_lift, periodic_argument, unbranched_argument,
principal_branch, transpose, adjoint, polarify, unpolarify)
@@ -55,7 +55,7 @@
'carmichael', 'fibonacci', 'lucas', 'motzkin', 'tribonacci', 'harmonic',
'bernoulli', 'bell', 'euler', 'catalan', 'genocchi', 'partition',
- 'sqrt', 'root', 'Min', 'Max', 'Id', 'real_root', 'cbrt',
+ 'sqrt', 'root', 'Min', 'Max', 'Id', 'real_root', 'cbrt', 'Rem',
're', 'im', 'sign', 'Abs', 'conjugate', 'arg', 'polar_lift',
'periodic_argument', 'unbranched_argument', 'principal_branch',
diff --git a/sympy/functions/elementary/miscellaneous.py b/sympy/functions/elementary/miscellaneous.py
index 1516fa27a4bb..a7c694fbabca 100644
--- a/sympy/functions/elementary/miscellaneous.py
+++ b/sympy/functions/elementary/miscellaneous.py
@@ -1,4 +1,4 @@
-from sympy.core import Function, S, sympify
+from sympy.core import Function, S, sympify, NumberKind
from sympy.utilities.iterables import sift
from sympy.core.add import Add
from sympy.core.containers import Tuple
@@ -17,6 +17,7 @@
from sympy.core.rules import Transform
from sympy.core.logic import fuzzy_and, fuzzy_or, _torf
from sympy.core.traversal import walk
+from sympy.core.numbers import Integer
from sympy.logic.boolalg import And, Or
@@ -865,3 +866,61 @@ def _eval_is_nonnegative(self):
def _eval_is_negative(self):
return fuzzy_or(a.is_negative for a in self.args)
+
+
+class Rem(Function):
+ """Returns the remainder when ``p`` is divided by ``q`` where ``p`` is finite
+ and ``q`` is not equal to zero. The result, ``p - int(p/q)*q``, has the same sign
+ as the divisor.
+
+ Parameters
+ ==========
+
+ p : Expr
+ Dividend.
+
+ q : Expr
+ Divisor.
+
+ Notes
+ =====
+
+ ``Rem`` corresponds to the ``%`` operator in C.
+
+ Examples
+ ========
+
+ >>> from sympy.abc import x, y
+ >>> from sympy import Rem
+ >>> Rem(x**3, y)
+ Rem(x**3, y)
+ >>> Rem(x**3, y).subs({x: -5, y: 3})
+ -2
+
+ See Also
+ ========
+
+ Mod
+ """
+ kind = NumberKind
+
+ @classmethod
+ def eval(cls, p, q):
+ def doit(p, q):
+ """ the function remainder if both p,q are numbers
+ and q is not zero
+ """
+
+ if q.is_zero:
+ raise ZeroDivisionError("Division by zero")
+ if p is S.NaN or q is S.NaN or p.is_finite is False or q.is_finite is False:
+ return S.NaN
+ if p is S.Zero or p in (q, -q) or (p.is_integer and q == 1):
+ return S.Zero
+
+ if q.is_Number:
+ if p.is_Number:
+ return p - Integer(p/q)*q
+ rv = doit(p, q)
+ if rv is not None:
+ return rv
| diff --git a/sympy/core/tests/test_args.py b/sympy/core/tests/test_args.py
index e9af8c6fb99b..63b80276e236 100644
--- a/sympy/core/tests/test_args.py
+++ b/sympy/core/tests/test_args.py
@@ -2203,6 +2203,11 @@ def test_sympy__functions__elementary__miscellaneous__MinMaxBase():
pass
+def test_sympy__functions__elementary__miscellaneous__Rem():
+ from sympy.functions.elementary.miscellaneous import Rem
+ assert _test_args(Rem(x, 2))
+
+
def test_sympy__functions__elementary__piecewise__ExprCondPair():
from sympy.functions.elementary.piecewise import ExprCondPair
assert _test_args(ExprCondPair(1, True))
diff --git a/sympy/functions/elementary/tests/test_miscellaneous.py b/sympy/functions/elementary/tests/test_miscellaneous.py
index b2751cc5aa0f..c95fb0aa7870 100644
--- a/sympy/functions/elementary/tests/test_miscellaneous.py
+++ b/sympy/functions/elementary/tests/test_miscellaneous.py
@@ -10,7 +10,7 @@
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor, ceiling
from sympy.functions.elementary.miscellaneous import (sqrt, cbrt, root, Min,
- Max, real_root)
+ Max, real_root, Rem)
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.functions.special.delta_functions import Heaviside
@@ -465,3 +465,12 @@ def test_issue_6899():
x = Symbol('x')
eqn = Lambda(x, x)
assert eqn.func(*eqn.args) == eqn
+
+def test_Rem():
+ from sympy.abc import x, y
+ assert Rem(5, 3) == 2
+ assert Rem(-5, 3) == -2
+ assert Rem(5, -3) == 2
+ assert Rem(-5, -3) == -2
+ assert Rem(x**3, y) == Rem(x**3, y)
+ assert Rem(Rem(-5, 3) + 3, 3) == 1
| [
{
"components": [
{
"doc": "Returns the remainder when ``p`` is divided by ``q`` where ``p`` is finite\nand ``q`` is not equal to zero. The result, ``p - int(p/q)*q``, has the same sign\nas the divisor.\n\nParameters\n==========\n\np : Expr\n Dividend.\n\nq : Expr\n Divisor.\n\nNotes\n=====\... | [
"test_sympy__functions__elementary__miscellaneous__Rem",
"test_Min",
"test_Max",
"test_minmax_assumptions",
"test_issue_8413",
"test_root",
"test_real_root",
"test_rewrite_MaxMin_as_Heaviside",
"test_rewrite_MaxMin_as_Piecewise",
"test_issue_11099",
"test_issue_12638",
"test_issue_21399",
"t... | [
"test_all_classes_are_tested",
"test_sympy__algebras__quaternion__Quaternion",
"test_sympy__assumptions__assume__AppliedPredicate",
"test_predicates",
"test_sympy__assumptions__assume__UndefinedPredicate",
"test_sympy__assumptions__relation__binrel__AppliedBinaryRelation",
"test_sympy__assumptions__wrap... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Added Rem(ainder) Function
<!-- Your title above should be a short description of what
was changed. Do not include the issue number in the title. -->
#### References to other Issues or PRs
<!-- If this pull request fixes an issue, write "Fixes #NNNN" in that exact
format, e.g. "Fixes #1234" (see
https://tinyurl.com/auto-closing for more information). Also, please
write a comment on that issue linking back to this pull request once it is
open. -->
Fixes #22090
#### Brief description of what is fixed or changed
There was no function to find the remainder if one number was negative
#### Other comments
#### Release Notes
<!-- Write the release notes for this release below between the BEGIN and END
statements. The basic format is a bulleted list with the name of the subpackage
and the release note for this PR. For example:
* solvers
* Added a new solver for logarithmic equations.
* functions
* Fixed a bug with log of integers.
or if no release note(s) should be included use:
NO ENTRY
See https://github.com/sympy/sympy/wiki/Writing-Release-Notes for more
information on how to write release notes. The bot will check your release
notes automatically to see if they are formatted correctly. -->
<!-- BEGIN RELEASE NOTES -->
* functions
* Added a function `Rem` to find remainder with same sign as the dividend (`Mod` has same sign as divisor).
<!-- END RELEASE NOTES -->
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in sympy/functions/elementary/miscellaneous.py]
(definition of Rem:)
class Rem(Function):
"""Returns the remainder when ``p`` is divided by ``q`` where ``p`` is finite
and ``q`` is not equal to zero. The result, ``p - int(p/q)*q``, has the same sign
as the divisor.
Parameters
==========
p : Expr
Dividend.
q : Expr
Divisor.
Notes
=====
``Rem`` corresponds to the ``%`` operator in C.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import Rem
>>> Rem(x**3, y)
Rem(x**3, y)
>>> Rem(x**3, y).subs({x: -5, y: 3})
-2
See Also
========
Mod"""
(definition of Rem.eval:)
def eval(cls, p, q):
(definition of Rem.eval.doit:)
def doit(p, q):
"""the function remainder if both p,q are numbers
and q is not zero"""
[end of new definitions in sympy/functions/elementary/miscellaneous.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | Here is the discussion in the issues of the pull request.
<issues>
Add Rem(ainder) function?
Related to #22089
So far, `Mod(x, y)` has been printed in C as `x % y`. This is only correct if the sign of x and y are the same (or rather both being either non-negative or non-positive). Typically, if a language has support for both modulo and remainder, `Mod` has the same sign as the divisor/denominator and the remainder the same sign as the dividend/numerator, see this historic version of the Wikipedia page: https://en.wikipedia.org/w/index.php?title=Modulo_operation&oldid=764138354 The new version of the page use the terms floored (modulo) and truncated (remainder): https://en.wikipedia.org/wiki/Modulo_operation There is also a Euclidean modulo operation, where the result is always non-negative. The Python operation `%` corresponds to the modulo/floored version, as do SymPy's `Mod`.
It may make sense to add a `Rem` function to SymPy, if nothing else to simplify printing of these things (by rewriting to the version that is supported in the particular language) and to clarify the difference between these.
| x | y | mod(x, y) | rem(x, y) |
|--|--|--------|-----|
| 5 | 3 | 2 | 2|
| -5 | 3 | 1 | -2 |
| 5 | -3 | -1 | 2 |
| -5 | -3 | -2 | -2 |
To rewrite `Mod(x, y)` to `Rem`, one can use `Rem(Rem(x, y) + y, y)` (unless one know that x and y are both non-negative or non-positive, then it is just `Rem(x, y)`).
----------
Just to complicate things further: C99 defines a function called [remainder](https://en.cppreference.com/w/c/numeric/math/remainder), which for 5.0, 3.0 gives -1.0.
Fortran 77 has `mod` which is essentially "rem" here, and Fortran 90+ has `modulo` which is our "mod".
A suitable place for a designated class for code printing might be in `sympy.codegen.ast`.
It would be good to be able to distinguish the two, but I don't think we should use the names Mod and Remainder to do so. I don't think it will be obvious at all that they are different or which is which, and it's not a standard convention as far as I'm aware for the two to mean those things (see also [np.remainder](https://numpy.org/doc/stable/reference/generated/numpy.remainder.html)). I think it would be better to add a keyword argument to Mod to allow the different behaviors, which would either return separate FloorMod, TruncatedMod, etc. class or just be stored in the `args`. Or we could just add those classes directly in the codegen module.
Numpy made a bad choice there... Going against `math.remainder` and, what they even explicitly name, behaving as the modulus operator in Python.
Well, all language that has named two functions called something like mod and rem follows this convention. But, e.g. Fortran has mod and modulo, and many languages only support one of them.
I agree that it's weird when NumPy sometimes uses a completely different name for the function version of an operator for no apparent reason. They do this in many other places too.
But even so, I don't see this is a good way to differentiate these two concepts. Names like FloorMod and TruncatedMod are much clearer (and even those names could probably be improved).
--------------------
</issues> | 3e8695add7a25c8d70aeba7d6137496df02863fd | |
slackapi__python-slack-sdk-1130 | 1,130 | slackapi/python-slack-sdk | null | 77e906a2bd0ff3d45c65635476c625c0ed5be3a8 | 2021-10-21T01:40:02Z | diff --git a/slack_sdk/audit_logs/v1/logs.py b/slack_sdk/audit_logs/v1/logs.py
index b5119c919..214f8f077 100644
--- a/slack_sdk/audit_logs/v1/logs.py
+++ b/slack_sdk/audit_logs/v1/logs.py
@@ -102,6 +102,22 @@ def __init__(
self.unknown_fields = kwargs
+class ConversationPref:
+ type: Optional[List[str]]
+ user: Optional[List[str]]
+
+ def __init__(
+ self,
+ *,
+ type: Optional[List[str]] = None,
+ user: Optional[List[str]] = None,
+ **kwargs,
+ ) -> None:
+ self.type = type
+ self.user = user
+ self.unknown_fields = kwargs
+
+
class Details:
name: Optional[str]
new_value: Optional[Union[str, List[str], Dict[str, Any]]]
@@ -159,6 +175,8 @@ class Details:
is_token_rotation_enabled_app: Optional[bool]
old_retention_policy: Optional[RetentionPolicy]
new_retention_policy: Optional[RetentionPolicy]
+ who_can_post: Optional[ConversationPref]
+ can_thread: Optional[ConversationPref]
def __init__(
self,
@@ -218,6 +236,8 @@ def __init__(
is_token_rotation_enabled_app: Optional[bool] = None,
old_retention_policy: Optional[Union[Dict[str, Any], RetentionPolicy]] = None,
new_retention_policy: Optional[Union[Dict[str, Any], RetentionPolicy]] = None,
+ who_can_post: Optional[Union[Dict[str, List[str]], ConversationPref]] = None,
+ can_thread: Optional[Union[Dict[str, List[str]], ConversationPref]] = None,
**kwargs,
) -> None:
self.name = name
@@ -284,6 +304,16 @@ def __init__(
if isinstance(new_retention_policy, RetentionPolicy)
else RetentionPolicy(**new_retention_policy)
)
+ self.who_can_post = (
+ who_can_post
+ if isinstance(who_can_post, ConversationPref)
+ else ConversationPref(**who_can_post)
+ )
+ self.can_thread = (
+ can_thread
+ if isinstance(can_thread, ConversationPref)
+ else ConversationPref(**can_thread)
+ )
class App:
| diff --git a/tests/slack_sdk/audit_logs/test_response.py b/tests/slack_sdk/audit_logs/test_response.py
index 9a21d49e7..379455575 100644
--- a/tests/slack_sdk/audit_logs/test_response.py
+++ b/tests/slack_sdk/audit_logs/test_response.py
@@ -133,6 +133,10 @@ def test_logs_complete(self):
self.assertEqual(entry.details.new_retention_policy.type, "new")
self.assertEqual(entry.details.is_internal_integration, True)
self.assertEqual(entry.details.cleared_resolution, "approved")
+ self.assertEqual(entry.details.who_can_post.type, ["owner", "admin"])
+ self.assertEqual(entry.details.who_can_post.user, ["W111"])
+ self.assertEqual(entry.details.can_thread.type, ["admin", "org_admin"])
+ self.assertEqual(entry.details.can_thread.user, ["W222"])
logs_response_data = """{
@@ -335,7 +339,25 @@ def test_logs_complete(self):
"duration_days": 222
},
"is_internal_integration": true,
- "cleared_resolution": "approved"
+ "cleared_resolution": "approved",
+ "who_can_post": {
+ "type": [
+ "owner",
+ "admin"
+ ],
+ "user": [
+ "W111"
+ ]
+ },
+ "can_thread": {
+ "type": [
+ "admin",
+ "org_admin"
+ ],
+ "user": [
+ "W222"
+ ]
+ }
}
}
]
| [
{
"components": [
{
"doc": "",
"lines": [
105,
118
],
"name": "ConversationPref",
"signature": "class ConversationPref:",
"type": "class"
},
{
"doc": "",
"lines": [
109,
118
],
... | [
"tests/slack_sdk/audit_logs/test_response.py::TestAuditLogsClient::test_logs_complete"
] | [
"tests/slack_sdk/audit_logs/test_response.py::TestAuditLogsClient::test_logs"
] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Audit Logs API: Add new details properties for channel_posting_permissions_updated action
## Summary
This pull request adds two new properties to the Audit Logs API response data class. "who_can_post" and "can_thread" can be included in "channel_posting_permissions_updated" action response starting today. See also: https://github.com/slackapi/java-slack-sdk/commit/62f93c768d3bd3348dd1f4567dad86b660527fd4
### Category (place an `x` in each of the `[ ]`)
- [ ] **slack_sdk.web.WebClient (sync/async)** (Web API client)
- [ ] **slack_sdk.webhook.WebhookClient (sync/async)** (Incoming Webhook, response_url sender)
- [ ] **slack_sdk.socket_mode** (Socket Mode client)
- [ ] **slack_sdk.signature** (Request Signature Verifier)
- [ ] **slack_sdk.oauth** (OAuth Flow Utilities)
- [ ] **slack_sdk.models** (UI component builders)
- [ ] **slack_sdk.scim** (SCIM API client)
- [x] **slack_sdk.audit_logs** (Audit Logs API client)
- [ ] **slack_sdk.rtm_v2** (RTM client)
- [ ] `/docs-src` (Documents, have you run `./scripts/docs.sh`?)
- [ ] `/docs-src-v2` (Documents, have you run `./scripts/docs-v2.sh`?)
- [ ] `/tutorial` (PythOnBoardingBot tutorial)
- [x] `tests`/`integration_tests` (Automated tests for this library)
## Requirements (place an `x` in each `[ ]`)
- [x] I've read and understood the [Contributing Guidelines](https://github.com/slackapi/python-slack-sdk/blob/main/.github/contributing.md) and have done my best effort to follow them.
- [x] I've read and agree to the [Code of Conduct](https://slackhq.github.io/code-of-conduct).
- [x] I've run `python3 -m venv .venv && source .venv/bin/activate && ./scripts/run_validation.sh` after making the changes.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in slack_sdk/audit_logs/v1/logs.py]
(definition of ConversationPref:)
class ConversationPref:
(definition of ConversationPref.__init__:)
def __init__( self, *, type: Optional[List[str]] = None, user: Optional[List[str]] = None, **kwargs, ) -> None:
[end of new definitions in slack_sdk/audit_logs/v1/logs.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 2997a1786c4fd969b00ce69af888ebae8e8ebed0 | ||
Project-MONAI__MONAI-3138 | 3,138 | Project-MONAI/MONAI | null | 1d3a9d773886dce57afd4a3165fe0e44fc3a50d8 | 2021-10-15T14:06:30Z | diff --git a/monai/config/__init__.py b/monai/config/__init__.py
index e3f623823c..64fdd3d7f0 100644
--- a/monai/config/__init__.py
+++ b/monai/config/__init__.py
@@ -12,7 +12,9 @@
from .deviceconfig import (
USE_COMPILED,
IgniteInfo,
+ get_config_values,
get_gpu_info,
+ get_optional_config_values,
get_system_info,
print_config,
print_debug_info,
diff --git a/monai/data/__init__.py b/monai/data/__init__.py
index b12a307663..cbca16af37 100644
--- a/monai/data/__init__.py
+++ b/monai/data/__init__.py
@@ -43,6 +43,7 @@
from .synthetic import create_test_image_2d, create_test_image_3d
from .test_time_augmentation import TestTimeAugmentation
from .thread_buffer import ThreadBuffer, ThreadDataLoader
+from .torchscript_utils import load_net_with_metadata, save_net_with_metadata
from .utils import (
compute_importance_map,
compute_shape_offset,
diff --git a/monai/data/torchscript_utils.py b/monai/data/torchscript_utils.py
new file mode 100644
index 0000000000..17539ce1dc
--- /dev/null
+++ b/monai/data/torchscript_utils.py
@@ -0,0 +1,149 @@
+# Copyright 2020 - 2021 MONAI Consortium
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import json
+import os
+from typing import IO, Any, Mapping, Optional, Sequence, Tuple, Union
+
+import torch
+
+from monai.config import get_config_values
+from monai.utils import JITMetadataKeys
+from monai.utils.module import pytorch_after
+
+METADATA_FILENAME = "metadata.json"
+
+
+def save_net_with_metadata(
+ jit_obj: torch.nn.Module,
+ filename_prefix_or_stream: Union[str, IO[Any]],
+ include_config_vals: bool = True,
+ append_timestamp: bool = False,
+ meta_values: Optional[Mapping[str, Any]] = None,
+ more_extra_files: Optional[Mapping[str, bytes]] = None,
+) -> None:
+ """
+ Save the JIT object (script or trace produced object) `jit_obj` to the given file or stream with metadata
+ included as a JSON file. The Torchscript format is a zip file which can contain extra file data which is used
+ here as a mechanism for storing metadata about the network being saved. The data in `meta_values` should be
+ compatible with conversion to JSON using the standard library function `dumps`. The intent is this metadata will
+ include information about the network applicable to some use case, such as describing the input and output format,
+ a network name and version, a plain language description of what the network does, and other relevant scientific
+ information. Clients can use this information to determine automatically how to use the network, and users can
+ read what the network does and keep track of versions.
+
+ Examples::
+
+ net = torch.jit.script(monai.networks.nets.UNet(2, 1, 1, [8, 16], [2]))
+
+ meta = {
+ "name": "Test UNet",
+ "used_for": "demonstration purposes",
+ "input_dims": 2,
+ "output_dims": 2
+ }
+
+ # save the Torchscript bundle with the above dictionary stored as an extra file
+ save_net_with_metadata(m, "test", meta_values=meta)
+
+ # load the network back, `loaded_meta` has same data as `meta` plus version information
+ loaded_net, loaded_meta, _ = load_net_with_metadata("test.pt")
+
+
+ Args:
+ jit_obj: object to save, should be generated by `script` or `trace`.
+ filename_prefix_or_stream: filename or file-like stream object, if filename has no extension it becomes `.pt`.
+ include_config_vals: if True, MONAI, Pytorch, and Numpy versions are included in metadata.
+ append_timestamp: if True, a timestamp for "now" is appended to the file's name before the extension.
+ meta_values: metadata values to store with the object, not limited just to keys in `JITMetadataKeys`.
+ more_extra_files: other extra file data items to include in bundle, see `_extra_files` of `torch.jit.save`.
+ """
+
+ now = datetime.datetime.now()
+ metadict = {}
+
+ if include_config_vals:
+ metadict.update(get_config_values())
+ metadict[JITMetadataKeys.TIMESTAMP.value] = now.astimezone().isoformat()
+
+ if meta_values is not None:
+ metadict.update(meta_values)
+
+ json_data = json.dumps(metadict)
+
+ # Pytorch>1.6 can use dictionaries directly, otherwise need to use special map object
+ if pytorch_after(1, 7):
+ extra_files = {METADATA_FILENAME: json_data.encode()}
+
+ if more_extra_files is not None:
+ extra_files.update(more_extra_files)
+ else:
+ extra_files = torch._C.ExtraFilesMap() # type:ignore[attr-defined]
+ extra_files[METADATA_FILENAME] = json_data.encode()
+
+ if more_extra_files is not None:
+ for k, v in more_extra_files.items():
+ extra_files[k] = v
+
+ if isinstance(filename_prefix_or_stream, str):
+ filename_no_ext, ext = os.path.splitext(filename_prefix_or_stream)
+ if ext == "":
+ ext = ".pt"
+
+ if append_timestamp:
+ filename_prefix_or_stream = now.strftime(f"{filename_no_ext}_%Y%m%d%H%M%S{ext}")
+ else:
+ filename_prefix_or_stream = filename_no_ext + ext
+
+ torch.jit.save(jit_obj, filename_prefix_or_stream, extra_files)
+
+
+def load_net_with_metadata(
+ filename_prefix_or_stream: Union[str, IO[Any]],
+ map_location: Optional[torch.device] = None,
+ more_extra_files: Sequence[str] = (),
+) -> Tuple[torch.nn.Module, dict, dict]:
+ """
+ Load the module object from the given Torchscript filename or stream, and convert the stored JSON metadata
+ back to a dict object. This will produce an empty dict if the metadata file is not present.
+
+ Args:
+ filename_prefix_or_stream: filename or file-like stream object.
+ map_location: network map location as in `torch.jit.load`.
+ more_extra_files: other extra file data names to load from bundle, see `_extra_files` of `torch.jit.load`.
+ Returns:
+ Triple containing loaded object, metadata dict, and extra files dict containing other file data if present
+ """
+ # Pytorch>1.6 can use dictionaries directly, otherwise need to use special map object
+ if pytorch_after(1, 7):
+ extra_files = {f: "" for f in more_extra_files}
+ extra_files[METADATA_FILENAME] = ""
+ else:
+ extra_files = torch._C.ExtraFilesMap() # type:ignore[attr-defined]
+ extra_files[METADATA_FILENAME] = ""
+
+ for f in more_extra_files:
+ extra_files[f] = ""
+
+ jit_obj = torch.jit.load(filename_prefix_or_stream, map_location, extra_files)
+
+ extra_files = dict(extra_files.items()) # compatibility with ExtraFilesMap
+
+ if METADATA_FILENAME in extra_files:
+ json_data = extra_files[METADATA_FILENAME]
+ del extra_files[METADATA_FILENAME]
+ else:
+ json_data = "{}"
+
+ json_data_dict = json.loads(json_data)
+
+ return jit_obj, json_data_dict, extra_files
diff --git a/monai/utils/__init__.py b/monai/utils/__init__.py
index d58fcca32d..eeea64f0cd 100644
--- a/monai/utils/__init__.py
+++ b/monai/utils/__init__.py
@@ -24,6 +24,7 @@
GridSamplePadMode,
InterpolateMode,
InverseKeys,
+ JITMetadataKeys,
LossReduction,
Method,
MetricReduction,
diff --git a/monai/utils/enums.py b/monai/utils/enums.py
index c059a4a5e2..486a7b81ce 100644
--- a/monai/utils/enums.py
+++ b/monai/utils/enums.py
@@ -266,3 +266,15 @@ class TransformBackends(Enum):
TORCH = "torch"
NUMPY = "numpy"
+
+
+class JITMetadataKeys(Enum):
+ """
+ Keys stored in the metadata file for saved Torchscript models. Some of these are generated by the routines
+ and others are optionally provided by users.
+ """
+
+ NAME = "name"
+ TIMESTAMP = "timestamp"
+ VERSION = "version"
+ DESCRIPTION = "description"
| diff --git a/tests/test_torchscript_utils.py b/tests/test_torchscript_utils.py
new file mode 100644
index 0000000000..b9840f68f1
--- /dev/null
+++ b/tests/test_torchscript_utils.py
@@ -0,0 +1,112 @@
+# Copyright 2020 - 2021 MONAI Consortium
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import tempfile
+import unittest
+
+import torch
+
+from monai.config import get_config_values
+from monai.data import load_net_with_metadata, save_net_with_metadata
+from monai.utils import JITMetadataKeys
+from monai.utils.module import pytorch_after
+
+
+class TestModule(torch.nn.Module):
+ def forward(self, x):
+ return x + 10
+
+
+class TestTorchscript(unittest.TestCase):
+ def test_save_net_with_metadata(self):
+ """Save a network without metadata to a file."""
+ m = torch.jit.script(TestModule())
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ save_net_with_metadata(m, f"{tempdir}/test")
+
+ self.assertTrue(os.path.isfile(f"{tempdir}/test.pt"))
+
+ def test_save_net_with_metadata_ext(self):
+ """Save a network without metadata to a file."""
+ m = torch.jit.script(TestModule())
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ save_net_with_metadata(m, f"{tempdir}/test.zip")
+
+ self.assertTrue(os.path.isfile(f"{tempdir}/test.zip"))
+
+ def test_save_net_with_metadata_with_extra(self):
+ """Save a network with simple metadata to a file."""
+ m = torch.jit.script(TestModule())
+
+ test_metadata = {"foo": [1, 2], "bar": "string"}
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ save_net_with_metadata(m, f"{tempdir}/test", meta_values=test_metadata)
+
+ self.assertTrue(os.path.isfile(f"{tempdir}/test.pt"))
+
+ def test_load_net_with_metadata(self):
+ """Save then load a network with no metadata or other extra files."""
+ m = torch.jit.script(TestModule())
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ save_net_with_metadata(m, f"{tempdir}/test")
+ _, meta, extra_files = load_net_with_metadata(f"{tempdir}/test.pt")
+
+ del meta[JITMetadataKeys.TIMESTAMP.value] # no way of knowing precisely what this value would be
+
+ self.assertEqual(meta, get_config_values())
+ self.assertEqual(extra_files, {})
+
+ def test_load_net_with_metadata_with_extra(self):
+ """Save then load a network with basic metadata."""
+ m = torch.jit.script(TestModule())
+
+ test_metadata = {"foo": [1, 2], "bar": "string"}
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ save_net_with_metadata(m, f"{tempdir}/test", meta_values=test_metadata)
+ _, meta, extra_files = load_net_with_metadata(f"{tempdir}/test.pt")
+
+ del meta[JITMetadataKeys.TIMESTAMP.value] # no way of knowing precisely what this value would be
+
+ test_compare = get_config_values()
+ test_compare.update(test_metadata)
+
+ self.assertEqual(meta, test_compare)
+ self.assertEqual(extra_files, {})
+
+ def test_save_load_more_extra_files(self):
+ """Save then load extra file data from a torchscript file."""
+ m = torch.jit.script(TestModule())
+
+ test_metadata = {"foo": [1, 2], "bar": "string"}
+
+ more_extra_files = {"test.txt": b"This is test data"}
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ save_net_with_metadata(m, f"{tempdir}/test", meta_values=test_metadata, more_extra_files=more_extra_files)
+
+ self.assertTrue(os.path.isfile(f"{tempdir}/test.pt"))
+
+ _, _, loaded_extra_files = load_net_with_metadata(f"{tempdir}/test.pt", more_extra_files=("test.txt",))
+
+ if pytorch_after(1, 7):
+ self.assertEqual(more_extra_files["test.txt"], loaded_extra_files["test.txt"])
+ else:
+ self.assertEqual(more_extra_files["test.txt"].decode(), loaded_extra_files["test.txt"])
+
+
+if __name__ == "__main__":
+ unittest.main()
| [
{
"components": [
{
"doc": "Save the JIT object (script or trace produced object) `jit_obj` to the given file or stream with metadata\nincluded as a JSON file. The Torchscript format is a zip file which can contain extra file data which is used\nhere as a mechanism for storing metadata about the n... | [
"tests/test_torchscript_utils.py::TestTorchscript::test_load_net_with_metadata",
"tests/test_torchscript_utils.py::TestTorchscript::test_load_net_with_metadata_with_extra",
"tests/test_torchscript_utils.py::TestTorchscript::test_save_load_more_extra_files",
"tests/test_torchscript_utils.py::TestTorchscript::t... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Adding Torchscript utility functions
Signed-off-by: Eric Kerfoot <eric.kerfoot@kcl.ac.uk>
### Description
Adding metadata to saved Torchscript objects was dicussed as a useful feature with the Deploy team. The intent is to define saved networks that are self-describing, stating what their inputs and outputs are, what data requirements they, how to interpret their outputs, etc. Other information such as timestamps, versions, names, plain-language descriptions, and other important scientific descriptions can also be added. The mechanism works by saving a mapping of items as a JSON file in the Torchscript object, Pytorch provides the facilities to do this through the standard API calls so there's no need to manipulate the zip files directly.
### Status
**Work in progress**
### Types of changes
<!--- Put an `x` in all the boxes that apply, and remove the not applicable items -->
- [x] Non-breaking change (fix or new feature that would not break existing functionality).
- [ ] Breaking change (fix or new feature that would cause existing functionality to change).
- [x] New tests added to cover the changes.
- [ ] Integration tests passed locally by running `./runtests.sh -f -u --net --coverage`.
- [ ] Quick tests passed locally by running `./runtests.sh --quick --unittests`.
- [ ] In-line docstrings updated.
- [ ] Documentation updated, tested `make html` command in the `docs/` folder.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in monai/data/torchscript_utils.py]
(definition of save_net_with_metadata:)
def save_net_with_metadata( jit_obj: torch.nn.Module, filename_prefix_or_stream: Union[str, IO[Any]], include_config_vals: bool = True, append_timestamp: bool = False, meta_values: Optional[Mapping[str, Any]] = None, more_extra_files: Optional[Mapping[str, bytes]] = None, ) -> None:
"""Save the JIT object (script or trace produced object) `jit_obj` to the given file or stream with metadata
included as a JSON file. The Torchscript format is a zip file which can contain extra file data which is used
here as a mechanism for storing metadata about the network being saved. The data in `meta_values` should be
compatible with conversion to JSON using the standard library function `dumps`. The intent is this metadata will
include information about the network applicable to some use case, such as describing the input and output format,
a network name and version, a plain language description of what the network does, and other relevant scientific
information. Clients can use this information to determine automatically how to use the network, and users can
read what the network does and keep track of versions.
Examples::
net = torch.jit.script(monai.networks.nets.UNet(2, 1, 1, [8, 16], [2]))
meta = {
"name": "Test UNet",
"used_for": "demonstration purposes",
"input_dims": 2,
"output_dims": 2
}
# save the Torchscript bundle with the above dictionary stored as an extra file
save_net_with_metadata(m, "test", meta_values=meta)
# load the network back, `loaded_meta` has same data as `meta` plus version information
loaded_net, loaded_meta, _ = load_net_with_metadata("test.pt")
Args:
jit_obj: object to save, should be generated by `script` or `trace`.
filename_prefix_or_stream: filename or file-like stream object, if filename has no extension it becomes `.pt`.
include_config_vals: if True, MONAI, Pytorch, and Numpy versions are included in metadata.
append_timestamp: if True, a timestamp for "now" is appended to the file's name before the extension.
meta_values: metadata values to store with the object, not limited just to keys in `JITMetadataKeys`.
more_extra_files: other extra file data items to include in bundle, see `_extra_files` of `torch.jit.save`."""
(definition of load_net_with_metadata:)
def load_net_with_metadata( filename_prefix_or_stream: Union[str, IO[Any]], map_location: Optional[torch.device] = None, more_extra_files: Sequence[str] = (), ) -> Tuple[torch.nn.Module, dict, dict]:
"""Load the module object from the given Torchscript filename or stream, and convert the stored JSON metadata
back to a dict object. This will produce an empty dict if the metadata file is not present.
Args:
filename_prefix_or_stream: filename or file-like stream object.
map_location: network map location as in `torch.jit.load`.
more_extra_files: other extra file data names to load from bundle, see `_extra_files` of `torch.jit.load`.
Returns:
Triple containing loaded object, metadata dict, and extra files dict containing other file data if present"""
[end of new definitions in monai/data/torchscript_utils.py]
[start of new definitions in monai/utils/enums.py]
(definition of JITMetadataKeys:)
class JITMetadataKeys(Enum):
"""Keys stored in the metadata file for saved Torchscript models. Some of these are generated by the routines
and others are optionally provided by users."""
[end of new definitions in monai/utils/enums.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | e73257caa79309dcce1e93abf1632f4bfd75b11f | ||
joke2k__faker-1548 | 1,548 | joke2k/faker | null | 2c857e1fb64d0797bf352995815dbca56de23b8f | 2021-10-11T16:09:22Z | diff --git a/faker/providers/automotive/nl_NL/__init__.py b/faker/providers/automotive/nl_NL/__init__.py
new file mode 100644
index 0000000000..3f54070934
--- /dev/null
+++ b/faker/providers/automotive/nl_NL/__init__.py
@@ -0,0 +1,79 @@
+import re
+import string
+
+from .. import Provider as AutomotiveProvider
+
+
+class Provider(AutomotiveProvider):
+ """Implement automotive provider for `nl_NL` locale.
+
+ Sources:
+ - https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_the_Netherlands
+ - https://www.cbs.nl/en-gb/figures/detail/82044eng
+
+ .. |license_plate_car| replace::
+ :meth:`license_plate_car() <faker.providers.automotive.nl_NL.Provider.license_plate_car>`
+
+ .. |license_plate_motorbike| replace::
+ :meth:`license_plate_motorbike() <faker.providers.automotive.nl_NL.Provider.license_plate_motorbike>`
+ """
+
+ # License formats for cars / other vehicles than motorbikes
+ license_formats = (
+ # Format 6
+ "##-%?-??",
+
+ # Format 7
+ "##-%??-#",
+
+ # Format 8
+ "#-@??-##",
+
+ # Format 9
+ "%?-###-?",
+
+ # Format 10
+ "%-###-??",
+ )
+
+ # License formats for motorbikes.
+ # According to CBS, approximately 10% of road vehicles in the Netherlands are motorbikes
+ license_formats_motorbike = (
+ "M?-??-##",
+ "##-M?-??",
+ )
+
+ # Base first letters of format
+ license_plate_prefix_letters = "BDFGHJKLNPRSTVXZ"
+
+ # For Format 8 (9-XXX-99) "BDFGHJLNPR" are not used,
+ # as to not clash with former export license plates
+ license_plate_prefix_letters_format_8 = "KSTVXZ"
+
+ def license_plate_motorbike(self) -> str:
+ """Generate a license plate for motorbikes."""
+ return self.bothify(self.random_element(self.license_formats_motorbike),
+ letters=string.ascii_uppercase)
+
+ def license_plate_car(self) -> str:
+ """Generate a license plate for cars."""
+ # Replace % with license_plate_prefix_letters
+ temp = re.sub(r"\%",
+ self.random_element(self.license_plate_prefix_letters),
+ self.random_element(self.license_formats))
+
+ # Replace @ with license_plate_prefix_letters_format_8
+ temp = re.sub(r"\@",
+ self.random_element(self.license_plate_prefix_letters_format_8),
+ temp)
+
+ return self.bothify(temp, letters=string.ascii_uppercase)
+
+ def license_plate(self) -> str:
+ """Generate a license plate.
+ This method randomly chooses 10% between |license_plate_motorbike|
+ or 90% |license_plate_car| to generate the result.
+ """
+ if self.generator.random.random() < 0.1:
+ return self.license_plate_motorbike()
+ return self.license_plate_car()
| diff --git a/tests/providers/test_automotive.py b/tests/providers/test_automotive.py
index bda4b6268a..083ddf268f 100644
--- a/tests/providers/test_automotive.py
+++ b/tests/providers/test_automotive.py
@@ -217,3 +217,35 @@ class TestElGr(_SimpleAutomotiveTestMixin):
"""Test el_GR automotive provider methods"""
license_plate_pattern = re.compile(r'^(?P<prefix>[A-Z]{2,3}) \d{4}$')
+
+
+class TestNlNl(_SimpleAutomotiveTestMixin):
+ """Test nl_NL automotive provider methods"""
+ license_plate_car_pattern = re.compile(
+ r'\d{2}-[BDFGHJKLNPRSTVXZ][A-Z]-[A-Z]{2}|'
+ r'\d{2}-[BDFGHJKLNPRSTVXZ][A-Z]{2}-\d|'
+ r'\d-[KSTVXZ][A-Z]{2}-\d{2}|'
+ r'[BDFGHJKLNPRSTVXZ][A-Z]-\d{3}-[A-Z]|'
+ r'[BDFGHJKLNPRSTVXZ]-\d{3}-[A-Z]{2}',
+ )
+
+ license_plate_motorbike_pattern = re.compile(
+ r'M[A-Z]-[A-Z]{2}-\d{2}|'
+ r'\d{2}-M[A-Z]-[A-Z]{2}',
+ )
+
+ license_plate_pattern = re.compile(
+ license_plate_car_pattern.pattern + '|' + license_plate_motorbike_pattern.pattern,
+ )
+
+ def test_plate_car(self, faker, num_samples):
+ for _ in range(num_samples):
+ plate = faker.license_plate_car()
+ assert isinstance(plate, str)
+ assert self.license_plate_car_pattern.match(plate)
+
+ def test_plate_motorbike(self, faker, num_samples):
+ for _ in range(num_samples):
+ plate = faker.license_plate_motorbike()
+ assert isinstance(plate, str)
+ assert self.license_plate_motorbike_pattern.match(plate)
| [
{
"components": [
{
"doc": "Implement automotive provider for `nl_NL` locale.\n\nSources:\n- https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_the_Netherlands\n- https://www.cbs.nl/en-gb/figures/detail/82044eng\n\n.. |license_plate_car| replace::\n :meth:`license_plate_car() <faker.pr... | [
"tests/providers/test_automotive.py::TestNlNl::test_license_plate",
"tests/providers/test_automotive.py::TestNlNl::test_plate_car",
"tests/providers/test_automotive.py::TestNlNl::test_plate_motorbike"
] | [
"tests/providers/test_automotive.py::TestSkSk::test_license_plate",
"tests/providers/test_automotive.py::TestPtBr::test_license_plate",
"tests/providers/test_automotive.py::TestPtPt::test_license_plate",
"tests/providers/test_automotive.py::TestHeIl::test_license_plate",
"tests/providers/test_automotive.py:... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Added automotive provider for nl_NL
### What does this change?
Added an automotive provider for Dutch (`nl_NL`), which generates license plates for cars, motorbikes and other road vehicles according to https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_the_Netherlands (format 6 through 10, covering 1999 to present).
### What was wrong
`faker` dit not include license plate formats for the `nl_NL` locale.
### How this fixes it
- Added an `nl_NL` folder in `faker/providers/automotive/`
- Added an `__init__.py` file in the `nl_NL` folder, with the same format as `faker/providers/date_time/es_ES/__init__.py`
- Added license plates for motorbikes (`license_plate_motorbike()`), cars and other vehicles (`license_plate_motorbike()`), and override `license_plate()` to generate 10% motorbike plates and 90% other road vehicle plates (according to [official CBS statistics](https://www.cbs.nl/en-gb/figures/detail/82044eng) approx. 10% of road vehicles are motorbikes)
- Added test cases for `license_plate()`, `license_plate_motorbike()` and `license_plate_car()`
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/automotive/nl_NL/__init__.py]
(definition of Provider:)
class Provider(AutomotiveProvider):
"""Implement automotive provider for `nl_NL` locale.
Sources:
- https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_the_Netherlands
- https://www.cbs.nl/en-gb/figures/detail/82044eng
.. |license_plate_car| replace::
:meth:`license_plate_car() <faker.providers.automotive.nl_NL.Provider.license_plate_car>`
.. |license_plate_motorbike| replace::
:meth:`license_plate_motorbike() <faker.providers.automotive.nl_NL.Provider.license_plate_motorbike>`"""
(definition of Provider.license_plate_motorbike:)
def license_plate_motorbike(self) -> str:
"""Generate a license plate for motorbikes."""
(definition of Provider.license_plate_car:)
def license_plate_car(self) -> str:
"""Generate a license plate for cars."""
(definition of Provider.license_plate:)
def license_plate(self) -> str:
"""Generate a license plate.
This method randomly chooses 10% between |license_plate_motorbike|
or 90% |license_plate_car| to generate the result."""
[end of new definitions in faker/providers/automotive/nl_NL/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | ||
joke2k__faker-1544 | 1,544 | joke2k/faker | null | 3818045332f4cb2911e5ac18f69e385bf0c51af0 | 2021-10-10T09:59:09Z | diff --git a/faker/providers/currency/el_GR/__init__.py b/faker/providers/currency/el_GR/__init__.py
new file mode 100644
index 0000000000..bfa2e40334
--- /dev/null
+++ b/faker/providers/currency/el_GR/__init__.py
@@ -0,0 +1,161 @@
+from .. import Provider as CurrencyProvider
+
+
+class Provider(CurrencyProvider):
+ # Source https://el.wikipedia.org/wiki/Κατάλογος_νομισμάτων_των_χωρών_του_κόσμου
+ # Format: (code, name)
+ currencies = (
+ ("AED", "Ντιρχάμ των Ηνωμένων Αραβικών Εμιράτων"),
+ ("AFN", "Αφγάνι"),
+ ("ALL", "Λεκ"),
+ ("AMD", "Ντραμ"),
+ ("AOA", "Κουάνζα"),
+ ("ARS", "Πέσο Αργεντινής"),
+ ("AUD", "Δολάριο Αυστραλίας"),
+ ("AZN", "Μανάτ του Αζερμπαϊτζάν"),
+ ("BAM", "Μετατρέψιμο μάρκο Βοσνίας και Ερζεγοβίνης"),
+ ("BBD", "Δολάριο των Μπαρμπάντος"),
+ ("BDT", "Τάκα"),
+ ("BGN", "Λεβ"),
+ ("BHD", "Δηνάριο του Μπαχρέιν"),
+ ("BIF", "Φράγκο του Μπουρούντι"),
+ ("BND", "Κυάτ Μιανμάρ"),
+ ("BOB", "Μπολιβιάνο"),
+ ("BRL", "Ρεάλ Βραζιλίας"),
+ ("BSD", "Δολάριο Μπαχάμας"),
+ ("BTN", "Νγκούλντρουμ"),
+ ("BWP", "Πούλα"),
+ ("BYΝ", "Ρούβλι Λευκορωσίας"),
+ ("BZD", "Δολάριο Μπελίζ"),
+ ("CAD", "Δολάριο Καναδά"),
+ ("CDF", "Φράγκο του Κονγκό"),
+ ("CHF", "Ελβετικό Φράγκο"),
+ ("CLP", "Πέσο Χιλής"),
+ ("CNY", "Γιουάν |"),
+ ("COP", "Πέσο Κολομβίας"),
+ ("CRC", "Κολόν"),
+ ("CSD", "Δηνάριο Σερβίας"),
+ ("CUC", "Μετατρέψιμο πέσο Κούβας"),
+ ("CUP", "Πέσος Κούβας"),
+ ("CVE", "Εσκούδο Πρασίνου Ακρωτηρίου"),
+ ("CZK", "Κορόνα Τσεχίας (koruna)"),
+ ("DJF", "Φράγκο του Τζιμπουτί"),
+ ("DKK", "Κορόνα Δανίας"),
+ ("DOP", "Πέσο Δομινικανής Δημοκρατίας"),
+ ("DZD", "Δηνάριο της Αλγερίας"),
+ ("EGP", "Λίρα Αιγύπτου"),
+ ("ERN", "Νάκφα"),
+ ("ETB", "Μπιρ"),
+ ("EUR", "Ευρώ"),
+ ("FJD", "Δολάριο Νησιών Φίτζι"),
+ ("GBP", "Στερλίνα"),
+ ("GEL", "Λάρι"),
+ ("GHC", "Σέντι της Γκάνας"),
+ ("GMD", "Νταλάζι (Dalasi)"),
+ ("GNF", "Φράγκο Γουινέας"),
+ ("GTQ", "Κετσάλ"),
+ ("GYD", "Δολάριο Γουιάνας"),
+ ("HNL", "Λεμπίρα"),
+ ("HRK", "Κούνα"),
+ ("HTG", "Γκουρντ"),
+ ("HUF", "Φιορίνι Ουγγαρίας"),
+ ("IDR", "Ρουπία Ινδονησίας"),
+ ("ILS", "Νέο σέκελ"),
+ ("INR", "Ρουπία Ινδίας[6]"),
+ ("IQD", "Δηνάριο του Ιράκ"),
+ ("IRR", "Ριάλ του Ιράν"),
+ ("ISK", "Κορόνα Ισλανδίας (króna)"),
+ ("JMD", "Δολάριο Τζαμάικας"),
+ ("JOD", "Ιορδανικό δηνάριο"),
+ ("JPY", "Γιέν"),
+ ("KES", "Σελίνι Κένυας"),
+ ("KGS", "Σομ της Κιργιζίας"),
+ ("KHR", "Ριέλ Καμπότζης"),
+ ("KMF", "Φράγκο Κομόρων"),
+ ("KPW", "Γουόν Βόρειας Κορέας"),
+ ("KRW", "Γουόν Νότιας Κορέας"),
+ ("KWD", "Δηνάριο του Κουβέιτ"),
+ ("KZT", "Τένγκε"),
+ ("LAK", "Κιπ"),
+ ("LBP", "Λίρα Λιβάνου"),
+ ("LKR", "Ρουπία της Σρι Λάνκας (rupee)"),
+ ("LRD", "Δολάριο Λιβερίας"),
+ ("LSL", "Λότι"),
+ ("LYD", "Δηνάριο Λιβύης"),
+ ("MAD", "Ντιρχάμ Μαρόκου"),
+ ("MDL", "Μολδαβικό Λέου"),
+ ("MGA", "Αριάρι[10]"),
+ ("MKD", "Δηνάριο Βόρειας Μακεδονίας"),
+ ("MNT", "Τουγκρίκ"),
+ ("MRU", "Ουγκίγια[10]"),
+ ("MUR", "Ρουπία Μαυρίκιου"),
+ ("MVR", "Ρουφίγια"),
+ ("MWK", "Κουάτσα του Μαλάουι"),
+ ("MXN", "Πέσο Μεξικού"),
+ ("MYR", "Ρινγκίτ"),
+ ("MZN", "Μετικάλ"),
+ ("NAD", "Δολάριο Ναμίμπιας"),
+ ("NGN", "Νάιρα"),
+ ("NIO", "Χρυσό κόρντομπα της Νικαράγουας"),
+ ("NOK", "Κορόνα Νορβηγίας (krone)"),
+ ("NPR", "Ρουπία του Νεπάλ (rupee)"),
+ ("NZD", "Δολάριο Νέας Ζηλανδίας"),
+ ("OMR", "Ριάλ του Ομάν"),
+ ("PAB", "Μπαλμπόα Παναμά"),
+ ("PEK", "ΠΕΚΕΡΟΝ"),
+ ("PEN", "Σολ Περού (sol)"),
+ ("PGK", "Κίνα Παπούα-Νέας Γουινέας"),
+ ("PHP", "Πέσο Φιλιππίνων"),
+ ("PKR", "Ρουπία του Πακιστάν (rupee)"),
+ ("PLN", "Ζλότι"),
+ ("PYG", "Γκουαρανί"),
+ ("QAR", "Ριγιάλ του Κατάρ"),
+ ("RON", "Λέου Ρουμανίας"),
+ ("RUB", "Ρούβλι Ρωσίας"),
+ ("RWF", "Φράγκο της Ρουάντα"),
+ ("SAR", "Ριάλ Σαουδικής Αραβίας (riyal)"),
+ ("SBD", "Δολάριο των Νήσων του Σολομώντα"),
+ ("SCR", "Ρουπία των Σεϋχελλών (Seychellois rupee)"),
+ ("SDG", "Λίρα του Σουδάν"),
+ ("SEK", "Κορόνα Σουηδίας (krona)"),
+ ("SGD", "Δολάριο Σιγκαπούρης"),
+ ("SLL", "Λεόνε της Σιέρα Λεόνε"),
+ ("SOS", "Σελίνι Σομαλίας"),
+ ("SRD", "Δολάριο του Σουρινάμ"),
+ ("SSP", "Λίρα Νοτίου Σουδάν"),
+ ("STN", "Ντόμπρα"),
+ ("SYP", "Λίρα Συρίας"),
+ ("SZL", "Λιλανγκένι"),
+ ("THB", "Μπαχτ"),
+ ("TJS", "Σομόνι"),
+ ("TMM", "Μανάτ του Τουρκμενιστάν"),
+ ("TND", "Δηνάριο Τυνησίας"),
+ ("TOP", "Παάνγκα"),
+ ("TRY", "Τουρκική Λίρα"),
+ ("TTD", "Δολάριο Τρινιντάντ και Τομπάγκο"),
+ ("TZS", "Σελίνι Τανζανίας (shilling)"),
+ ("UAH", "Γρίβνα Ουκρανίας"),
+ ("UGX", "Σελίνι Ουγκάντας"),
+ ("USD", "Δολάριο ΗΠΑ"),
+ ("UYU", "Πέσο Ουρουγουάης"),
+ ("UZS", "Σομ του Ουζμπεκιστάν"),
+ ("VES", "Μπολίβαρ Σομπεράνο"),
+ ("VND", "Ντονγκ"),
+ ("VUV", "Βάτου"),
+ ("WST", "Τάλα Σαμόα"),
+ ("XAF", "Φράγκο CFA Κεντρικής Αφρικής"),
+ ("XCD", "Δολάριο Ανατολικής Καραϊβικής"),
+ ("XOF", "Φράγκο CFA Δυτικής Αφρικής"),
+ ("YER", "Ριάλ Υεμένης"),
+ ("ZAR", "Ραντ Νότιας Αφρικής"),
+ ("ZMK", "Κουάτσα της Ζάμπιας"),
+ ("ZWD", "RTGS Dollar"),
+ )
+
+ price_formats = ["#,##", "%#,##", "%##,##", "%.###,##", "%#.###,##"]
+
+ def pricetag(self) -> str:
+ return (
+ self.numerify(self.random_element(self.price_formats))
+ + "\N{no-break space}\N{euro sign}"
+ )
| diff --git a/tests/providers/test_currency.py b/tests/providers/test_currency.py
index 9f16c06f95..0238ce1b6c 100644
--- a/tests/providers/test_currency.py
+++ b/tests/providers/test_currency.py
@@ -378,3 +378,19 @@ def test_pricetag(self, faker, num_samples):
for _ in range(num_samples):
pricetag = faker.pricetag()
assert isinstance(pricetag, str)
+
+
+class TestElGr:
+ """Test nl_NL currency provider"""
+
+ num_samples = 100
+
+ @classmethod
+ def setup_class(cls):
+ from faker.providers.currency.el_GR import Provider as ElGrCurrencyProvider
+ cls.provider = ElGrCurrencyProvider
+
+ def test_pricetag(self, faker, num_samples):
+ for _ in range(num_samples):
+ pricetag = faker.pricetag()
+ assert isinstance(pricetag, str)
| [
{
"components": [
{
"doc": "",
"lines": [
4,
160
],
"name": "Provider",
"signature": "class Provider(CurrencyProvider):",
"type": "class"
},
{
"doc": "",
"lines": [
157,
160
],
... | [
"tests/providers/test_currency.py::TestElGr::test_pricetag"
] | [
"tests/providers/test_currency.py::TestCurrencyProvider::test_currency",
"tests/providers/test_currency.py::TestCurrencyProvider::test_currency_code",
"tests/providers/test_currency.py::TestCurrencyProvider::test_currency_name",
"tests/providers/test_currency.py::TestCurrencyProvider::test_currency_symbol_no_... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add `currency` provider for `el_GR`
### What does this changes
Add `currency` provider for `el_GR`
### What was wrong
Provider did not exist
### How this fixes it
Add `currency` provider for `el_GR`
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/currency/el_GR/__init__.py]
(definition of Provider:)
class Provider(CurrencyProvider):
(definition of Provider.pricetag:)
def pricetag(self) -> str:
[end of new definitions in faker/providers/currency/el_GR/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | ||
joke2k__faker-1543 | 1,543 | joke2k/faker | null | 3818045332f4cb2911e5ac18f69e385bf0c51af0 | 2021-10-10T09:18:21Z | diff --git a/faker/providers/automotive/el_GR/__init__.py b/faker/providers/automotive/el_GR/__init__.py
new file mode 100644
index 0000000000..ebf998bb7b
--- /dev/null
+++ b/faker/providers/automotive/el_GR/__init__.py
@@ -0,0 +1,22 @@
+import re
+
+from .. import Provider as AutomotiveProvider
+
+
+class Provider(AutomotiveProvider):
+ """Implement automotive provider for ``el_GR`` locale.
+ """
+
+ uppercase_letters = 'ABEZHIKMNOPTYX'
+
+ license_formats = (
+ '??? ####',
+ '?? ####',
+ )
+
+ def license_plate(self) -> str:
+ """Generate a license plate."""
+ temp = re.sub(r'\?',
+ lambda x: self.random_element(self.uppercase_letters),
+ self.random_element(self.license_formats))
+ return self.numerify(temp)
| diff --git a/tests/providers/test_automotive.py b/tests/providers/test_automotive.py
index d64ed4c0de..bda4b6268a 100644
--- a/tests/providers/test_automotive.py
+++ b/tests/providers/test_automotive.py
@@ -211,3 +211,9 @@ class TestRoRo(_SimpleAutomotiveTestMixin):
def perform_extra_checks(self, license_plate, match):
assert match.group('prefix') in RoRoAutomotiveProvider.license_plate_prefix
+
+
+class TestElGr(_SimpleAutomotiveTestMixin):
+ """Test el_GR automotive provider methods"""
+
+ license_plate_pattern = re.compile(r'^(?P<prefix>[A-Z]{2,3}) \d{4}$')
| [
{
"components": [
{
"doc": "Implement automotive provider for ``el_GR`` locale.\n ",
"lines": [
6,
22
],
"name": "Provider",
"signature": "class Provider(AutomotiveProvider):",
"type": "class"
},
{
"doc": "Generate ... | [
"tests/providers/test_automotive.py::TestElGr::test_license_plate"
] | [
"tests/providers/test_automotive.py::TestSkSk::test_license_plate",
"tests/providers/test_automotive.py::TestPtBr::test_license_plate",
"tests/providers/test_automotive.py::TestPtPt::test_license_plate",
"tests/providers/test_automotive.py::TestHeIl::test_license_plate",
"tests/providers/test_automotive.py:... | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add `automotive` provider for `el_GR`
### What does this changes
Add `automotive` provider for `el_GR`
### What was wrong
Provider was missing
### How this fixes it
Add `automotive` provider for `el_GR`
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/automotive/el_GR/__init__.py]
(definition of Provider:)
class Provider(AutomotiveProvider):
"""Implement automotive provider for ``el_GR`` locale.
"""
(definition of Provider.license_plate:)
def license_plate(self) -> str:
"""Generate a license plate."""
[end of new definitions in faker/providers/automotive/el_GR/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | ||
joke2k__faker-1542 | 1,542 | joke2k/faker | null | 20a18910bb37f7d8db4080deb2671b1c3a4b31f7 | 2021-10-10T07:34:13Z | diff --git a/README.rst b/README.rst
index fe10cbecc2..4809827141 100644
--- a/README.rst
+++ b/README.rst
@@ -260,6 +260,31 @@ How to create a Provider
fake.foo()
# 'bar'
+
+How to create a Dynamic Provider
+------------------------
+
+Dynamic providers can read elements from an external source.
+
+.. code:: python
+
+ from faker import Faker
+ from faker.providers import DynamicProvider
+
+ medical_professions_provider = DynamicProvider(
+ provider_name="medical_profession",
+ elements=["dr.", "doctor", "nurse", "surgeon", "clerk"],
+ )
+
+ fake = Faker()
+
+ # then add new provider to faker instance
+ fake.add_provider(medical_professions_provider)
+
+ # now you can use:
+ fake.medical_profession()
+ # 'dr.'
+
How to customize the Lorem Provider
-----------------------------------
diff --git a/faker/providers/__init__.py b/faker/providers/__init__.py
index 62e04d1a52..715b220dda 100644
--- a/faker/providers/__init__.py
+++ b/faker/providers/__init__.py
@@ -2,8 +2,9 @@
import string
from collections import OrderedDict
-from typing import Any, Dict, KeysView, Optional, Sequence, TypeVar, Union
+from typing import Any, Dict, KeysView, List, Optional, Sequence, TypeVar, Union
+from ..generator import Generator
from ..utils.distribution import choices_distribution, choices_distribution_unique
_re_hash = re.compile(r'#')
@@ -86,6 +87,10 @@ class BaseProvider:
}
def __init__(self, generator: Any) -> None:
+ """
+ Base class for fake data providers
+ :param generator: `Generator` instance
+ """
self.generator = generator
def locale(self) -> str:
@@ -488,3 +493,60 @@ def hexify(self, text: str = '^^^^', upper: bool = False) -> str:
if upper:
letters = letters.upper()
return _re_cir.sub(lambda x: self.random_element(letters), text)
+
+
+class DynamicProvider(BaseProvider):
+ def __init__(
+ self,
+ provider_name: str,
+ elements: Optional[List] = None,
+ generator: Optional[Any] = None,
+ ):
+ """
+ A faker Provider capable of getting a list of elements to randomly select from,
+ instead of using the predefined list of elements which exist in the default providers in faker.
+
+ :param provider_name: Name of provider, which would translate into the function name e.g. faker.my_fun().
+ :param elements: List of values to randomly select from
+ :param generator: Generator object. If missing, the default Generator is used.
+
+ :example:
+ >>>from faker import Faker
+ >>>from faker.providers import DynamicProvider
+
+ >>>medical_professions_provider = DynamicProvider(
+ >>> provider_name="medical_profession",
+ >>> elements=["dr.", "doctor", "nurse", "surgeon", "clerk"],
+ >>>)
+ >>>fake = Faker()
+ >>>fake.add_provider(medical_professions_provider)
+
+ >>>fake.medical_profession()
+ "dr."
+
+ """
+
+ if not generator:
+ generator = Generator()
+ super().__init__(generator)
+ if provider_name.startswith("__"):
+ raise ValueError("Provider name cannot start with __ as it would be ignored by Faker")
+
+ self.provider_name = provider_name
+
+ self.elements = []
+ if elements:
+ self.elements = elements
+
+ setattr(self, provider_name, self.get_random_value) # Add a method for the provider_name value
+
+ def add_element(self, element: str) -> None:
+ """Add new element."""
+ self.elements.append(element)
+
+ def get_random_value(self):
+
+ if not self.elements or len(self.elements) == 0:
+ raise ValueError("Elements should be a list of values the provider samples from")
+
+ return self.random_element(self.elements)
| diff --git a/tests/providers/test_dynamic.py b/tests/providers/test_dynamic.py
new file mode 100644
index 0000000000..7db878ead3
--- /dev/null
+++ b/tests/providers/test_dynamic.py
@@ -0,0 +1,72 @@
+import pytest
+
+from faker import Faker
+from faker.providers import DynamicProvider
+
+
+class TestDynamicProvider:
+ def test_without_dynamic(self):
+ faker = Faker()
+ with pytest.raises(
+ AttributeError,
+ match="'Generator' object has no attribute 'medical_profession'",
+ ):
+ faker.medical_profession()
+
+ def test_with_dynamic(self):
+ faker = Faker()
+ elements = ["dr.", "doctor", "nurse", "surgeon", "clerk"]
+ provider_name = "medical_profession"
+
+ medical_professions_provider = DynamicProvider(
+ provider_name=provider_name,
+ elements=elements,
+ )
+
+ faker.add_provider(medical_professions_provider)
+
+ assert faker.medical_profession() in elements
+
+ def test_dynamic_with_special_provider_name(self):
+ elements = ["dr.", "doctor", "nurse", "surgeon", "clerk"]
+ provider_name = "__special__" # The provider name cannot start with __
+
+ with pytest.raises(
+ ValueError,
+ match="Provider name cannot start with __ as it would be ignored by Faker",
+ ):
+ DynamicProvider(
+ provider_name=provider_name,
+ elements=elements,
+ )
+
+ def test_dynamic_with_empty_elements(self):
+ elements = []
+ provider_name = "my_provider"
+ provider = DynamicProvider(
+ provider_name=provider_name,
+ elements=elements,
+ )
+ faker = Faker()
+ faker.add_provider(provider)
+
+ with pytest.raises(
+ ValueError,
+ match="Elements should be a list of values the provider samples from",
+ ):
+ faker.my_provider()
+
+ def test_dynamic_add_element(self):
+ elements = []
+ provider_name = "my_provider"
+ provider = DynamicProvider(
+ provider_name=provider_name,
+ elements=elements,
+ )
+ faker = Faker()
+ faker.add_provider(provider)
+
+ provider.add_element("one")
+ provider.add_element("two")
+
+ assert faker.my_provider() in ("one", "two")
| diff --git a/README.rst b/README.rst
index fe10cbecc2..4809827141 100644
--- a/README.rst
+++ b/README.rst
@@ -260,6 +260,31 @@ How to create a Provider
fake.foo()
# 'bar'
+
+How to create a Dynamic Provider
+------------------------
+
+Dynamic providers can read elements from an external source.
+
+.. code:: python
+
+ from faker import Faker
+ from faker.providers import DynamicProvider
+
+ medical_professions_provider = DynamicProvider(
+ provider_name="medical_profession",
+ elements=["dr.", "doctor", "nurse", "surgeon", "clerk"],
+ )
+
+ fake = Faker()
+
+ # then add new provider to faker instance
+ fake.add_provider(medical_professions_provider)
+
+ # now you can use:
+ fake.medical_profession()
+ # 'dr.'
+
How to customize the Lorem Provider
-----------------------------------
| [
{
"components": [
{
"doc": "",
"lines": [
498,
552
],
"name": "DynamicProvider",
"signature": "class DynamicProvider(BaseProvider):",
"type": "class"
},
{
"doc": "A faker Provider capable of getting a list of elements ... | [
"tests/providers/test_dynamic.py::TestDynamicProvider::test_without_dynamic",
"tests/providers/test_dynamic.py::TestDynamicProvider::test_with_dynamic",
"tests/providers/test_dynamic.py::TestDynamicProvider::test_dynamic_with_special_provider_name",
"tests/providers/test_dynamic.py::TestDynamicProvider::test_... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Dynamic provider
### What does this change do
This change introduces a dynamic provider which allows users to create providers on the fly given a set of values. These values are passed to the init function and not hardcoded, therefore they could be used if the user has a list of elements coming from files or other sources of data.
Example code:
```python
from faker import Faker
from faker.providers import DynamicProvider
medical_professions_provider = DynamicProvider(
provider_name="medical_profession",
elements=["dr.", "doctor", "nurse", "surgeon", "clerk"], #could also be loaded from file
)
fake = Faker()
fake.add_provider(medical_professions_provider)
fake.medical_profession()
```
Output:
```
"dr."
```
This change will allow users to customize the elements Faker samples from, which is especially useful when generating synthetic data for ML model training, where the number of elements could be very large, or is gathered from some existing data source.
If you think this PR is valuable, I would be happy to add documentation as well.
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/__init__.py]
(definition of DynamicProvider:)
class DynamicProvider(BaseProvider):
(definition of DynamicProvider.__init__:)
def __init__( self, provider_name: str, elements: Optional[List] = None, generator: Optional[Any] = None, ):
"""A faker Provider capable of getting a list of elements to randomly select from,
instead of using the predefined list of elements which exist in the default providers in faker.
:param provider_name: Name of provider, which would translate into the function name e.g. faker.my_fun().
:param elements: List of values to randomly select from
:param generator: Generator object. If missing, the default Generator is used.
:example:
>>>from faker import Faker
>>>from faker.providers import DynamicProvider
>>>medical_professions_provider = DynamicProvider(
>>> provider_name="medical_profession",
>>> elements=["dr.", "doctor", "nurse", "surgeon", "clerk"],
>>>)
>>>fake = Faker()
>>>fake.add_provider(medical_professions_provider)
>>>fake.medical_profession()
"dr.""""
(definition of DynamicProvider.add_element:)
def add_element(self, element: str) -> None:
"""Add new element."""
(definition of DynamicProvider.get_random_value:)
def get_random_value(self):
[end of new definitions in faker/providers/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | |
joke2k__faker-1541 | 1,541 | joke2k/faker | null | 3818045332f4cb2911e5ac18f69e385bf0c51af0 | 2021-10-09T21:39:08Z | diff --git a/faker/providers/company/el_GR/__init__.py b/faker/providers/company/el_GR/__init__.py
new file mode 100644
index 0000000000..f8f323872e
--- /dev/null
+++ b/faker/providers/company/el_GR/__init__.py
@@ -0,0 +1,11 @@
+from .. import Provider as CompanyProvider
+
+
+class Provider(CompanyProvider):
+ formats = (
+ '{{last_name}} {{company_suffix}}',
+ '{{last_name}}-{{last_name}}',
+ '{{last_name}}-{{last_name}} {{company_suffix}}',
+ '{{last_name}}, {{last_name}} και {{last_name}}',
+ )
+ company_suffixes = ('Α.Ε.', 'και υιοί', 'Ο.Ε.', 'Α.Β.Ε.Ε.', 'Α.Ε. ΟΜΙΛΟΣ ΕΤΑΙΡΕΙΩΝ')
| diff --git a/tests/providers/test_company.py b/tests/providers/test_company.py
index 6fb49f72d5..0c1df1046f 100644
--- a/tests/providers/test_company.py
+++ b/tests/providers/test_company.py
@@ -5,6 +5,7 @@
import pytest
+from faker.providers.company.el_GR import Provider as ElGrCompanyProvider
from faker.providers.company.en_PH import Provider as EnPhCompanyProvider
from faker.providers.company.fil_PH import Provider as FilPhCompanyProvider
from faker.providers.company.hu_HU import Provider as HuHuCompanyProvider
@@ -421,3 +422,13 @@ def test_company_suffix(self, faker, num_samples):
suffix = faker.company_suffix()
assert isinstance(suffix, str)
assert suffix in RoRoCompanyProvider.company_suffixes
+
+
+class TestElGr:
+ """Test el_GR company provider methods"""
+
+ def test_company_suffix(self, faker, num_samples):
+ for _ in range(num_samples):
+ suffix = faker.company_suffix()
+ assert isinstance(suffix, str)
+ assert suffix in ElGrCompanyProvider.company_suffixes
| [
{
"components": [
{
"doc": "",
"lines": [
4,
11
],
"name": "Provider",
"signature": "class Provider(CompanyProvider):",
"type": "class"
}
],
"file": "faker/providers/company/el_GR/__init__.py"
}
] | [
"tests/providers/test_company.py::TestFiFi::test_company_business_id",
"tests/providers/test_company.py::TestHyAm::test_bs",
"tests/providers/test_company.py::TestHyAm::test_catch_phrase",
"tests/providers/test_company.py::TestHyAm::test_company",
"tests/providers/test_company.py::TestHyAm::test_company_suf... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add `company` provider for `el_GR`
### What does this changes
Add `company` provider for `el_GR`
### What was wrong
Provider was missing
### How this fixes it
Add `company` provider for `el_GR`
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/company/el_GR/__init__.py]
(definition of Provider:)
class Provider(CompanyProvider):
[end of new definitions in faker/providers/company/el_GR/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 | ||
joke2k__faker-1540 | 1,540 | joke2k/faker | null | 3818045332f4cb2911e5ac18f69e385bf0c51af0 | 2021-10-09T21:38:31Z | diff --git a/faker/providers/bank/el_GR/__init__.py b/faker/providers/bank/el_GR/__init__.py
new file mode 100644
index 0000000000..1d34c35cc3
--- /dev/null
+++ b/faker/providers/bank/el_GR/__init__.py
@@ -0,0 +1,8 @@
+from .. import Provider as BankProvider
+
+
+class Provider(BankProvider):
+ """Implement bank provider for ``el_GR`` locale."""
+
+ bban_format = '#######################'
+ country_code = 'GR'
| diff --git a/tests/providers/test_bank.py b/tests/providers/test_bank.py
index cd6760ae29..df4629d65f 100644
--- a/tests/providers/test_bank.py
+++ b/tests/providers/test_bank.py
@@ -4,6 +4,7 @@
from faker.providers.bank import Provider as BankProvider
from faker.providers.bank.de_CH import Provider as DeChBankProvider
+from faker.providers.bank.el_GR import Provider as ElGrBankProvider
from faker.providers.bank.en_GB import Provider as EnGbBankProvider
from faker.providers.bank.en_IE import Provider as EnIeBankProvider
from faker.providers.bank.en_PH import Provider as EnPhBankProvider
@@ -278,3 +279,17 @@ def test_iban(self, faker, num_samples):
assert is_valid_iban(iban)
assert iban[:2] == ThThBankProvider.country_code
assert re.fullmatch(r"\d{2}\d{10}", iban[2:])
+
+
+class TestElGr:
+ """Test el_GR bank provider"""
+ def test_bban(self, faker, num_samples):
+ for _ in range(num_samples):
+ assert re.fullmatch(r"\d{23}", faker.bban())
+
+ def test_iban(self, faker, num_samples):
+ for _ in range(num_samples):
+ iban = faker.iban()
+ assert is_valid_iban(iban)
+ assert iban[:2] == ElGrBankProvider.country_code
+ assert re.fullmatch(r"\d{2}\d{23}", iban[2:])
| [
{
"components": [
{
"doc": "Implement bank provider for ``el_GR`` locale.",
"lines": [
4,
8
],
"name": "Provider",
"signature": "class Provider(BankProvider):",
"type": "class"
}
],
"file": "faker/providers/bank/el_GR/__init... | [
"tests/providers/test_bank.py::TestNoNo::test_aba",
"tests/providers/test_bank.py::TestNoNo::test_bban",
"tests/providers/test_bank.py::TestNoNo::test_iban",
"tests/providers/test_bank.py::TestFiFi::test_bban",
"tests/providers/test_bank.py::TestFiFi::test_iban",
"tests/providers/test_bank.py::TestPlPl::t... | [] | This is a feature request which requires a new feature to add in the code repository.
<<NEW FEATURE REQUEST>>
<request>
Add `bank` provider for `el_GR`
### What does this changes
Add `bank` provider for `el_GR`
### What was wrong
Provider was missing
### How this fixes it
Add `bank` provider for `el_GR`
----------
</request>
There are several new functions or classes that need to be implemented, using the definitions below:
<<NEW DEFINITIONS>>
There are several new functions or classes that need to be implemented, using the definitions below:
<definitions>
[start of new definitions in faker/providers/bank/el_GR/__init__.py]
(definition of Provider:)
class Provider(BankProvider):
"""Implement bank provider for ``el_GR`` locale."""
[end of new definitions in faker/providers/bank/el_GR/__init__.py]
</definitions>
Please note that in addition to the newly added components mentioned above, you also need to make other code changes to ensure that the new feature can be executed properly.
<<END>> | 6edfdbf6ae90b0153309e3bf066aa3b2d16494a7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.