id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
171,762 | import os
from typing import Optional
from ._password_hasher import (
DEFAULT_HASH_LENGTH,
DEFAULT_MEMORY_COST,
DEFAULT_PARALLELISM,
DEFAULT_RANDOM_SALT_LENGTH,
DEFAULT_TIME_COST,
)
from ._typing import Literal
from .low_level import Type, hash_secret, hash_secret_raw, verify_secret
class Type(Enum):
"""
Enum of Argon2 variants.
Please see :doc:`parameters` on how to pick one.
"""
D = lib.Argon2_d
r"""
Argon2\ **d** is faster and uses data-depending memory access, which makes
it less suitable for hashing secrets and more suitable for cryptocurrencies
and applications with no threats from side-channel timing attacks.
"""
I = lib.Argon2_i
r"""
Argon2\ **i** uses data-independent memory access. Argon2i is slower as
it makes more passes over the memory to protect from tradeoff attacks.
"""
ID = lib.Argon2_id
r"""
Argon2\ **id** is a hybrid of Argon2i and Argon2d, using a combination of
data-depending and data-independent memory accesses, which gives some of
Argon2i's resistance to side-channel cache timing attacks and much of
Argon2d's resistance to GPU cracking attacks.
That makes it the preferred type for password hashing and password-based
key derivation.
.. versionadded:: 16.3.0
"""
def verify_secret(hash: bytes, secret: bytes, type: Type) -> Literal[True]:
"""
Verify whether *secret* is correct for *hash* of *type*.
:param bytes hash: An encoded Argon2 hash as returned by
:func:`hash_secret`.
:param bytes secret: The secret to verify whether it matches the one
in *hash*.
:param Type type: Type for *hash*.
:raises argon2.exceptions.VerifyMismatchError: If verification fails
because *hash* is not valid for *secret* of *type*.
:raises argon2.exceptions.VerificationError: If verification fails for
other reasons.
:return: ``True`` on success, raise
:exc:`~argon2.exceptions.VerificationError` otherwise.
:rtype: bool
.. versionadded:: 16.0.0
.. versionchanged:: 16.1.0
Raise :exc:`~argon2.exceptions.VerifyMismatchError` on mismatches
instead of its more generic superclass.
"""
rv = lib.argon2_verify(
ffi.new("char[]", hash),
ffi.new("uint8_t[]", secret),
len(secret),
type.value,
)
if rv == lib.ARGON2_OK:
return True
elif rv == lib.ARGON2_VERIFY_MISMATCH:
raise VerifyMismatchError(error_to_str(rv))
else:
raise VerificationError(error_to_str(rv))
The provided code snippet includes necessary dependencies for implementing the `verify_password` function. Write a Python function `def verify_password( hash: bytes, password: bytes, type: Type = Type.I ) -> Literal[True]` to solve the following problem:
Legacy alias for :func:`verify_secret` with default parameters. .. deprecated:: 16.0.0 Use :class:`argon2.PasswordHasher` for passwords.
Here is the function:
def verify_password(
hash: bytes, password: bytes, type: Type = Type.I
) -> Literal[True]:
"""
Legacy alias for :func:`verify_secret` with default parameters.
.. deprecated:: 16.0.0
Use :class:`argon2.PasswordHasher` for passwords.
"""
return verify_secret(hash, password, type) | Legacy alias for :func:`verify_secret` with default parameters. .. deprecated:: 16.0.0 Use :class:`argon2.PasswordHasher` for passwords. |
171,763 | from enum import Enum
from typing import Any
from _argon2_cffi_bindings import ffi, lib
from ._typing import Literal
from .exceptions import HashingError, VerificationError, VerifyMismatchError
Any = object()
The provided code snippet includes necessary dependencies for implementing the `core` function. Write a Python function `def core(context: Any, type: int) -> int` to solve the following problem:
Direct binding to the ``argon2_ctx`` function. .. warning:: This is a strictly advanced function working on raw C data structures. Both *Argon2*'s and *argon2-cffi*'s higher-level bindings do a lot of sanity checks and housekeeping work that *you* are now responsible for (e.g. clearing buffers). The structure of the *context* object can, has, and will change with *any* release! Use at your own peril; *argon2-cffi* does *not* use this binding itself. :param context: A CFFI *Argon2* context object (i.e. an ``struct Argon2_Context``/``argon2_context``). :param int type: Which *Argon2* variant to use. You can use the ``value`` field of :class:`Type`'s fields. :rtype: int :return: An *Argon2* error code. Can be transformed into a string using :func:`error_to_str`. .. versionadded:: 16.0.0
Here is the function:
def core(context: Any, type: int) -> int:
"""
Direct binding to the ``argon2_ctx`` function.
.. warning::
This is a strictly advanced function working on raw C data structures.
Both *Argon2*'s and *argon2-cffi*'s higher-level bindings do a lot of
sanity checks and housekeeping work that *you* are now responsible for
(e.g. clearing buffers). The structure of the *context* object can,
has, and will change with *any* release!
Use at your own peril; *argon2-cffi* does *not* use this binding
itself.
:param context: A CFFI *Argon2* context object (i.e. an ``struct
Argon2_Context``/``argon2_context``).
:param int type: Which *Argon2* variant to use. You can use the ``value``
field of :class:`Type`'s fields.
:rtype: int
:return: An *Argon2* error code. Can be transformed into a string using
:func:`error_to_str`.
.. versionadded:: 16.0.0
"""
return lib.argon2_ctx(context, type) | Direct binding to the ``argon2_ctx`` function. .. warning:: This is a strictly advanced function working on raw C data structures. Both *Argon2*'s and *argon2-cffi*'s higher-level bindings do a lot of sanity checks and housekeeping work that *you* are now responsible for (e.g. clearing buffers). The structure of the *context* object can, has, and will change with *any* release! Use at your own peril; *argon2-cffi* does *not* use this binding itself. :param context: A CFFI *Argon2* context object (i.e. an ``struct Argon2_Context``/``argon2_context``). :param int type: Which *Argon2* variant to use. You can use the ``value`` field of :class:`Type`'s fields. :rtype: int :return: An *Argon2* error code. Can be transformed into a string using :func:`error_to_str`. .. versionadded:: 16.0.0 |
171,764 | from __future__ import annotations
from collections.abc import Iterable
import string
from types import MappingProxyType
from typing import Any, BinaryIO, NamedTuple
from ._re import (
RE_DATETIME,
RE_LOCALTIME,
RE_NUMBER,
match_to_datetime,
match_to_localtime,
match_to_number,
)
from ._types import Key, ParseFloat, Pos
def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901
"""Parse TOML from a string."""
# The spec allows converting "\r\n" to "\n", even in string
# literals. Let's do so to simplify parsing.
src = __s.replace("\r\n", "\n")
pos = 0
out = Output(NestedDict(), Flags())
header: Key = ()
parse_float = make_safe_parse_float(parse_float)
# Parse one statement at a time
# (typically means one line in TOML source)
while True:
# 1. Skip line leading whitespace
pos = skip_chars(src, pos, TOML_WS)
# 2. Parse rules. Expect one of the following:
# - end of file
# - end of line
# - comment
# - key/value pair
# - append dict to list (and move to its namespace)
# - create dict (and move to its namespace)
# Skip trailing whitespace when applicable.
try:
char = src[pos]
except IndexError:
break
if char == "\n":
pos += 1
continue
if char in KEY_INITIAL_CHARS:
pos = key_value_rule(src, pos, out, header, parse_float)
pos = skip_chars(src, pos, TOML_WS)
elif char == "[":
try:
second_char: str | None = src[pos + 1]
except IndexError:
second_char = None
out.flags.finalize_pending()
if second_char == "[":
pos, header = create_list_rule(src, pos, out)
else:
pos, header = create_dict_rule(src, pos, out)
pos = skip_chars(src, pos, TOML_WS)
elif char != "#":
raise suffixed_err(src, pos, "Invalid statement")
# 3. Skip comment
pos = skip_comment(src, pos)
# 4. Expect end of line or end of file
try:
char = src[pos]
except IndexError:
break
if char != "\n":
raise suffixed_err(
src, pos, "Expected newline or end of document after a statement"
)
pos += 1
return out.data.dict
Any = object()
class BinaryIO(IO[bytes]):
def __enter__(self) -> BinaryIO: ...
ParseFloat = Callable[[str], Any]
The provided code snippet includes necessary dependencies for implementing the `load` function. Write a Python function `def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]` to solve the following problem:
Parse TOML from a binary file object.
Here is the function:
def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]:
"""Parse TOML from a binary file object."""
b = __fp.read()
try:
s = b.decode()
except AttributeError:
raise TypeError(
"File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
) from None
return loads(s, parse_float=parse_float) | Parse TOML from a binary file object. |
171,765 | from __future__ import annotations
import math
def two_factors(n: int) -> tuple[int, int]:
"""Split an integer into two integer factors.
The two factors will be as close as possible to the sqrt of n, and are returned in decreasing
order. Worst case returns (n, 1).
Args:
n (int): The integer to factorize.
Return:
tuple(int, int): The two factors of n, in decreasing order.
"""
i = math.ceil(math.sqrt(n))
while n % i != 0:
i -= 1
j = n // i
if i > j:
return i, j
else:
return j, i
The provided code snippet includes necessary dependencies for implementing the `calc_chunk_sizes` function. Write a Python function `def calc_chunk_sizes( chunk_size: int | tuple[int, int] | None, chunk_count: int | tuple[int, int] | None, total_chunk_count: int | None, ny: int, nx: int, ) -> tuple[int, int]` to solve the following problem:
Calculate chunk sizes. Args: chunk_size (int or tuple(int, int), optional): Chunk size in (y, x) directions, or the same size in both directions if only one is specified. chunk_count (int or tuple(int, int), optional): Chunk count in (y, x) directions, or the same count in both irections if only one is specified. total_chunk_count (int, optional): Total number of chunks. ny (int): Number of grid points in y-direction. nx (int): Number of grid points in x-direction. Return: tuple(int, int): Chunk sizes (y_chunk_size, x_chunk_size). Note: A maximum of one of ``chunk_size``, ``chunk_count`` and ``total_chunk_count`` may be specified.
Here is the function:
def calc_chunk_sizes(
chunk_size: int | tuple[int, int] | None,
chunk_count: int | tuple[int, int] | None,
total_chunk_count: int | None,
ny: int,
nx: int,
) -> tuple[int, int]:
"""Calculate chunk sizes.
Args:
chunk_size (int or tuple(int, int), optional): Chunk size in (y, x) directions, or the same
size in both directions if only one is specified.
chunk_count (int or tuple(int, int), optional): Chunk count in (y, x) directions, or the
same count in both irections if only one is specified.
total_chunk_count (int, optional): Total number of chunks.
ny (int): Number of grid points in y-direction.
nx (int): Number of grid points in x-direction.
Return:
tuple(int, int): Chunk sizes (y_chunk_size, x_chunk_size).
Note:
A maximum of one of ``chunk_size``, ``chunk_count`` and ``total_chunk_count`` may be
specified.
"""
if sum([chunk_size is not None, chunk_count is not None, total_chunk_count is not None]) > 1:
raise ValueError("Only one of chunk_size, chunk_count and total_chunk_count should be set")
if total_chunk_count is not None:
max_chunk_count = (nx-1)*(ny-1)
total_chunk_count = min(max(total_chunk_count, 1), max_chunk_count)
if total_chunk_count == 1:
chunk_size = 0
elif total_chunk_count == max_chunk_count:
chunk_size = (1, 1)
else:
factors = two_factors(total_chunk_count)
if ny > nx:
chunk_count = factors
else:
chunk_count = (factors[1], factors[0])
if chunk_count is not None:
if isinstance(chunk_count, tuple):
y_chunk_count, x_chunk_count = chunk_count
else:
y_chunk_count = x_chunk_count = chunk_count
x_chunk_count = min(max(x_chunk_count, 1), nx-1)
y_chunk_count = min(max(y_chunk_count, 1), ny-1)
chunk_size = (math.ceil((ny-1) / y_chunk_count), math.ceil((nx-1) / x_chunk_count))
if chunk_size is None:
y_chunk_size = x_chunk_size = 0
elif isinstance(chunk_size, tuple):
y_chunk_size, x_chunk_size = chunk_size
else:
y_chunk_size = x_chunk_size = chunk_size
if x_chunk_size < 0 or y_chunk_size < 0:
raise ValueError("chunk_size cannot be negative")
return y_chunk_size, x_chunk_size | Calculate chunk sizes. Args: chunk_size (int or tuple(int, int), optional): Chunk size in (y, x) directions, or the same size in both directions if only one is specified. chunk_count (int or tuple(int, int), optional): Chunk count in (y, x) directions, or the same count in both irections if only one is specified. total_chunk_count (int, optional): Total number of chunks. ny (int): Number of grid points in y-direction. nx (int): Number of grid points in x-direction. Return: tuple(int, int): Chunk sizes (y_chunk_size, x_chunk_size). Note: A maximum of one of ``chunk_size``, ``chunk_count`` and ``total_chunk_count`` may be specified. |
171,766 | from __future__ import annotations
from contourpy._contourpy import FillType, LineType, ZInterp
The provided code snippet includes necessary dependencies for implementing the `as_fill_type` function. Write a Python function `def as_fill_type(fill_type: FillType | str) -> FillType` to solve the following problem:
Coerce a FillType or string value to a FillType. Args: fill_type (FillType or str): Value to convert. Return: FillType: Converted value.
Here is the function:
def as_fill_type(fill_type: FillType | str) -> FillType:
"""Coerce a FillType or string value to a FillType.
Args:
fill_type (FillType or str): Value to convert.
Return:
FillType: Converted value.
"""
if isinstance(fill_type, str):
return FillType.__members__[fill_type]
else:
return fill_type | Coerce a FillType or string value to a FillType. Args: fill_type (FillType or str): Value to convert. Return: FillType: Converted value. |
171,767 | from __future__ import annotations
from contourpy._contourpy import FillType, LineType, ZInterp
The provided code snippet includes necessary dependencies for implementing the `as_line_type` function. Write a Python function `def as_line_type(line_type: LineType | str) -> LineType` to solve the following problem:
Coerce a LineType or string value to a LineType. Args: line_type (LineType or str): Value to convert. Return: LineType: Converted value.
Here is the function:
def as_line_type(line_type: LineType | str) -> LineType:
"""Coerce a LineType or string value to a LineType.
Args:
line_type (LineType or str): Value to convert.
Return:
LineType: Converted value.
"""
if isinstance(line_type, str):
return LineType.__members__[line_type]
else:
return line_type | Coerce a LineType or string value to a LineType. Args: line_type (LineType or str): Value to convert. Return: LineType: Converted value. |
171,768 | from __future__ import annotations
from contourpy._contourpy import FillType, LineType, ZInterp
The provided code snippet includes necessary dependencies for implementing the `as_z_interp` function. Write a Python function `def as_z_interp(z_interp: ZInterp | str) -> ZInterp` to solve the following problem:
Coerce a ZInterp or string value to a ZInterp. Args: z_interp (ZInterp or str): Value to convert. Return: ZInterp: Converted value.
Here is the function:
def as_z_interp(z_interp: ZInterp | str) -> ZInterp:
"""Coerce a ZInterp or string value to a ZInterp.
Args:
z_interp (ZInterp or str): Value to convert.
Return:
ZInterp: Converted value.
"""
if isinstance(z_interp, str):
return ZInterp.__members__[z_interp]
else:
return z_interp | Coerce a ZInterp or string value to a ZInterp. Args: z_interp (ZInterp or str): Value to convert. Return: ZInterp: Converted value. |
171,769 | from __future__ import annotations
from typing import TYPE_CHECKING, Any
import numpy as np
Any = object()
try:
import numpy
except ImportError:
pass
The provided code snippet includes necessary dependencies for implementing the `simple` function. Write a Python function `def simple( shape: tuple[int, int], want_mask: bool = False ) -> tuple[CoordinateArray, CoordinateArray, CoordinateArray | np.ma.MaskedArray[Any, Any]]` to solve the following problem:
Return simple test data consisting of the sum of two gaussians. Args: shape (tuple(int, int)): 2D shape of data to return. want_mask (bool, optional): Whether test data should be masked or not, default ``False``. Return: Tuple of 3 arrays: ``x``, ``y``, ``z`` test data, ``z`` will be masked if ``want_mask=True``.
Here is the function:
def simple(
shape: tuple[int, int], want_mask: bool = False
) -> tuple[CoordinateArray, CoordinateArray, CoordinateArray | np.ma.MaskedArray[Any, Any]]:
"""Return simple test data consisting of the sum of two gaussians.
Args:
shape (tuple(int, int)): 2D shape of data to return.
want_mask (bool, optional): Whether test data should be masked or not, default ``False``.
Return:
Tuple of 3 arrays: ``x``, ``y``, ``z`` test data, ``z`` will be masked if
``want_mask=True``.
"""
ny, nx = shape
x = np.arange(nx, dtype=np.float64)
y = np.arange(ny, dtype=np.float64)
x, y = np.meshgrid(x, y)
xscale = nx - 1.0
yscale = ny - 1.0
# z is sum of 2D gaussians.
amp = np.asarray([1.0, -1.0, 0.8, -0.9, 0.7])
mid = np.asarray([[0.4, 0.2], [0.3, 0.8], [0.9, 0.75], [0.7, 0.3], [0.05, 0.7]])
width = np.asarray([0.4, 0.2, 0.2, 0.2, 0.1])
z = np.zeros_like(x)
for i in range(len(amp)):
z += amp[i]*np.exp(-((x/xscale - mid[i, 0])**2 + (y/yscale - mid[i, 1])**2) / width[i]**2)
if want_mask:
mask = np.logical_or(
((x/xscale - 1.0)**2 / 0.2 + (y/yscale - 0.0)**2 / 0.1) < 1.0,
((x/xscale - 0.2)**2 / 0.02 + (y/yscale - 0.45)**2 / 0.08) < 1.0
)
z = np.ma.array(z, mask=mask) # type: ignore[no-untyped-call]
return x, y, z | Return simple test data consisting of the sum of two gaussians. Args: shape (tuple(int, int)): 2D shape of data to return. want_mask (bool, optional): Whether test data should be masked or not, default ``False``. Return: Tuple of 3 arrays: ``x``, ``y``, ``z`` test data, ``z`` will be masked if ``want_mask=True``. |
171,770 | from __future__ import annotations
from typing import TYPE_CHECKING, Any
import numpy as np
Any = object()
try:
import numpy
except ImportError:
pass
The provided code snippet includes necessary dependencies for implementing the `random` function. Write a Python function `def random( shape: tuple[int, int], seed: int = 2187, mask_fraction: float = 0.0 ) -> tuple[CoordinateArray, CoordinateArray, CoordinateArray | np.ma.MaskedArray[Any, Any]]` to solve the following problem:
Return random test data.. Args: shape (tuple(int, int)): 2D shape of data to return. seed (int, optional): Seed for random number generator, default 2187. mask_fraction (float, optional): Fraction of elements to mask, default 0. Return: Tuple of 3 arrays: ``x``, ``y``, ``z`` test data, ``z`` will be masked if ``mask_fraction`` is greater than zero.
Here is the function:
def random(
shape: tuple[int, int], seed: int = 2187, mask_fraction: float = 0.0
) -> tuple[CoordinateArray, CoordinateArray, CoordinateArray | np.ma.MaskedArray[Any, Any]]:
"""Return random test data..
Args:
shape (tuple(int, int)): 2D shape of data to return.
seed (int, optional): Seed for random number generator, default 2187.
mask_fraction (float, optional): Fraction of elements to mask, default 0.
Return:
Tuple of 3 arrays: ``x``, ``y``, ``z`` test data, ``z`` will be masked if
``mask_fraction`` is greater than zero.
"""
ny, nx = shape
x = np.arange(nx, dtype=np.float64)
y = np.arange(ny, dtype=np.float64)
x, y = np.meshgrid(x, y)
rng = np.random.default_rng(seed)
z = rng.uniform(size=shape)
if mask_fraction > 0.0:
mask_fraction = min(mask_fraction, 0.99)
mask = rng.uniform(size=shape) < mask_fraction
z = np.ma.array(z, mask=mask) # type: ignore[no-untyped-call]
return x, y, z | Return random test data.. Args: shape (tuple(int, int)): 2D shape of data to return. seed (int, optional): Seed for random number generator, default 2187. mask_fraction (float, optional): Fraction of elements to mask, default 0. Return: Tuple of 3 arrays: ``x``, ``y``, ``z`` test data, ``z`` will be masked if ``mask_fraction`` is greater than zero. |
171,771 | from __future__ import annotations
from typing import TYPE_CHECKING, cast
import matplotlib.path as mpath
import numpy as np
from contourpy import FillType, LineType
def offsets_to_mpl_codes(offsets: OffsetArray) -> CodeArray:
codes = np.full(offsets[-1]-offsets[0], 2, dtype=np.uint8) # LINETO = 2
codes[offsets[:-1]] = 1 # MOVETO = 1
codes[offsets[1:]-1] = 79 # CLOSEPOLY 79
return codes
try:
import numpy
except ImportError:
pass
def filled_to_mpl_paths(filled: FillReturn, fill_type: FillType) -> list[mpath.Path]:
if fill_type in (FillType.OuterCode, FillType.ChunkCombinedCode):
paths = [mpath.Path(points, codes) for points, codes in zip(*filled) if points is not None]
elif fill_type in (FillType.OuterOffset, FillType.ChunkCombinedOffset):
paths = [mpath.Path(points, offsets_to_mpl_codes(offsets))
for points, offsets in zip(*filled) if points is not None]
elif fill_type == FillType.ChunkCombinedCodeOffset:
paths = []
for points, codes, outer_offsets in zip(*filled):
if points is None:
continue
points = np.split(points, outer_offsets[1:-1])
codes = np.split(codes, outer_offsets[1:-1])
paths += [mpath.Path(p, c) for p, c in zip(points, codes)]
elif fill_type == FillType.ChunkCombinedOffsetOffset:
paths = []
for points, offsets, outer_offsets in zip(*filled):
if points is None:
continue
for i in range(len(outer_offsets)-1):
offs = offsets[outer_offsets[i]:outer_offsets[i+1]+1]
pts = points[offs[0]:offs[-1]]
paths += [mpath.Path(pts, offsets_to_mpl_codes(offs - offs[0]))]
else:
raise RuntimeError(f"Conversion of FillType {fill_type} to MPL Paths is not implemented")
return paths | null |
171,772 | from __future__ import annotations
from typing import TYPE_CHECKING, cast
import matplotlib.path as mpath
import numpy as np
from contourpy import FillType, LineType
if TYPE_CHECKING:
from contourpy._contourpy import (
CodeArray, FillReturn, LineReturn, LineReturn_Separate, OffsetArray,
)
TYPE_CHECKING = True
def cast(typ: Type[_T], val: Any) -> _T: ...
def cast(typ: str, val: Any) -> Any: ...
def cast(typ: object, val: Any) -> Any: ...
def lines_to_mpl_paths(lines: LineReturn, line_type: LineType) -> list[mpath.Path]:
if line_type == LineType.Separate:
if TYPE_CHECKING:
lines = cast(LineReturn_Separate, lines)
paths = []
for line in lines:
# Drawing as Paths so that they can be closed correctly.
closed = line[0, 0] == line[-1, 0] and line[0, 1] == line[-1, 1]
paths.append(mpath.Path(line, closed=closed))
elif line_type in (LineType.SeparateCode, LineType.ChunkCombinedCode):
paths = [mpath.Path(points, codes) for points, codes in zip(*lines) if points is not None]
elif line_type == LineType.ChunkCombinedOffset:
paths = []
for points, offsets in zip(*lines):
if points is None:
continue
for i in range(len(offsets)-1):
line = points[offsets[i]:offsets[i+1]]
closed = line[0, 0] == line[-1, 0] and line[0, 1] == line[-1, 1]
paths.append(mpath.Path(line, closed=closed))
else:
raise RuntimeError(f"Conversion of LineType {line_type} to MPL Paths is not implemented")
return paths | null |
171,773 | from __future__ import annotations
from typing import TYPE_CHECKING, cast
from contourpy import FillType, LineType
from contourpy.util.mpl_util import mpl_codes_to_offsets
def mpl_codes_to_offsets(codes: CodeArray) -> OffsetArray:
offsets = np.nonzero(codes == 1)[0].astype(np.uint32)
offsets = np.append(offsets, len(codes))
return offsets
def filled_to_bokeh(
filled: FillReturn,
fill_type: FillType,
) -> tuple[list[list[CoordinateArray]], list[list[CoordinateArray]]]:
xs: list[list[CoordinateArray]] = []
ys: list[list[CoordinateArray]] = []
if fill_type in (FillType.OuterOffset, FillType.ChunkCombinedOffset,
FillType.OuterCode, FillType.ChunkCombinedCode):
have_codes = fill_type in (FillType.OuterCode, FillType.ChunkCombinedCode)
for points, offsets in zip(*filled):
if points is None:
continue
if have_codes:
offsets = mpl_codes_to_offsets(offsets)
xs.append([]) # New outer with zero or more holes.
ys.append([])
for i in range(len(offsets)-1):
xys = points[offsets[i]:offsets[i+1]]
xs[-1].append(xys[:, 0])
ys[-1].append(xys[:, 1])
elif fill_type in (FillType.ChunkCombinedCodeOffset, FillType.ChunkCombinedOffsetOffset):
for points, codes_or_offsets, outer_offsets in zip(*filled):
if points is None:
continue
for j in range(len(outer_offsets)-1):
if fill_type == FillType.ChunkCombinedCodeOffset:
codes = codes_or_offsets[outer_offsets[j]:outer_offsets[j+1]]
offsets = mpl_codes_to_offsets(codes) + outer_offsets[j]
else:
offsets = codes_or_offsets[outer_offsets[j]:outer_offsets[j+1]+1]
xs.append([]) # New outer with zero or more holes.
ys.append([])
for k in range(len(offsets)-1):
xys = points[offsets[k]:offsets[k+1]]
xs[-1].append(xys[:, 0])
ys[-1].append(xys[:, 1])
else:
raise RuntimeError(f"Conversion of FillType {fill_type} to Bokeh is not implemented")
return xs, ys | null |
171,774 | from __future__ import annotations
from typing import TYPE_CHECKING, cast
from contourpy import FillType, LineType
from contourpy.util.mpl_util import mpl_codes_to_offsets
if TYPE_CHECKING:
from contourpy._contourpy import (
CoordinateArray, FillReturn, LineReturn, LineReturn_Separate, LineReturn_SeparateCode,
)
TYPE_CHECKING = True
def cast(typ: Type[_T], val: Any) -> _T: ...
def cast(typ: str, val: Any) -> Any: ...
def cast(typ: object, val: Any) -> Any: ...
def mpl_codes_to_offsets(codes: CodeArray) -> OffsetArray:
offsets = np.nonzero(codes == 1)[0].astype(np.uint32)
offsets = np.append(offsets, len(codes))
return offsets
def lines_to_bokeh(
lines: LineReturn,
line_type: LineType,
) -> tuple[list[CoordinateArray], list[CoordinateArray]]:
xs: list[CoordinateArray] = []
ys: list[CoordinateArray] = []
if line_type == LineType.Separate:
if TYPE_CHECKING:
lines = cast(LineReturn_Separate, lines)
for line in lines:
xs.append(line[:, 0])
ys.append(line[:, 1])
elif line_type == LineType.SeparateCode:
if TYPE_CHECKING:
lines = cast(LineReturn_SeparateCode, lines)
for line in lines[0]:
xs.append(line[:, 0])
ys.append(line[:, 1])
elif line_type in (LineType.ChunkCombinedCode, LineType.ChunkCombinedOffset):
for points, offsets in zip(*lines):
if points is None:
continue
if line_type == LineType.ChunkCombinedCode:
offsets = mpl_codes_to_offsets(offsets)
for i in range(len(offsets)-1):
line = points[offsets[i]:offsets[i+1]]
xs.append(line[:, 0])
ys.append(line[:, 1])
else:
raise RuntimeError(f"Conversion of LineType {line_type} to Bokeh is not implemented")
return xs, ys | null |
171,775 | from numbers import Number
from functools import partial
import math
import textwrap
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as tx
from matplotlib.colors import to_rgba
from matplotlib.collections import LineCollection
from ._oldcore import (
VectorPlotter,
)
from ._statistics import ECDF, Histogram, KDE
from ._stats.counting import Hist
from .axisgrid import (
FacetGrid,
_facet_docs,
)
from .utils import (
remove_na,
_kde_support,
_normalize_kwargs,
_check_argument,
_assign_default_kwargs,
_default_color,
)
from .palettes import color_palette
from .external import husl
from .external.kde import gaussian_kde
from ._docstrings import (
DocstringComponents,
_core_docs,
)
class _DistributionFacetPlotter(_DistributionPlotter):
def histplot(
data=None, *,
# Vector variables
x=None, y=None, hue=None, weights=None,
# Histogram computation parameters
stat="count", bins="auto", binwidth=None, binrange=None,
discrete=None, cumulative=False, common_bins=True, common_norm=True,
# Histogram appearance parameters
multiple="layer", element="bars", fill=True, shrink=1,
# Histogram smoothing with a kernel density estimate
kde=False, kde_kws=None, line_kws=None,
# Bivariate histogram parameters
thresh=0, pthresh=None, pmax=None, cbar=False, cbar_ax=None, cbar_kws=None,
# Hue mapping parameters
palette=None, hue_order=None, hue_norm=None, color=None,
# Axes information
log_scale=None, legend=True, ax=None,
# Other appearance keywords
**kwargs,
):
histplot.__doc__ = """\
Plot univariate or bivariate histograms to show distributions of datasets.
A histogram is a classic visualization tool that represents the distribution
of one or more variables by counting the number of observations that fall within
discrete bins.
This function can normalize the statistic computed within each bin to estimate
frequency, density or probability mass, and it can add a smooth curve obtained
using a kernel density estimate, similar to :func:`kdeplot`.
More information is provided in the :ref:`user guide <tutorial_hist>`.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
weights : vector or key in ``data``
If provided, weight the contribution of the corresponding data points
towards the count in each bin by these factors.
{params.hist.stat}
{params.hist.bins}
{params.hist.binwidth}
{params.hist.binrange}
discrete : bool
If True, default to ``binwidth=1`` and draw the bars so that they are
centered on their corresponding data points. This avoids "gaps" that may
otherwise appear when using discrete (integer) data.
cumulative : bool
If True, plot the cumulative counts as bins increase.
common_bins : bool
If True, use the same bins when semantic variables produce multiple
plots. If using a reference rule to determine the bins, it will be computed
with the full dataset.
common_norm : bool
If True and using a normalized statistic, the normalization will apply over
the full dataset. Otherwise, normalize each histogram independently.
multiple : {{"layer", "dodge", "stack", "fill"}}
Approach to resolving multiple elements when semantic mapping creates subsets.
Only relevant with univariate data.
element : {{"bars", "step", "poly"}}
Visual representation of the histogram statistic.
Only relevant with univariate data.
fill : bool
If True, fill in the space under the histogram.
Only relevant with univariate data.
shrink : number
Scale the width of each bar relative to the binwidth by this factor.
Only relevant with univariate data.
kde : bool
If True, compute a kernel density estimate to smooth the distribution
and show on the plot as (one or more) line(s).
Only relevant with univariate data.
kde_kws : dict
Parameters that control the KDE computation, as in :func:`kdeplot`.
line_kws : dict
Parameters that control the KDE visualization, passed to
:meth:`matplotlib.axes.Axes.plot`.
thresh : number or None
Cells with a statistic less than or equal to this value will be transparent.
Only relevant with bivariate data.
pthresh : number or None
Like ``thresh``, but a value in [0, 1] such that cells with aggregate counts
(or other statistics, when used) up to this proportion of the total will be
transparent.
pmax : number or None
A value in [0, 1] that sets that saturation point for the colormap at a value
such that cells below constitute this proportion of the total count (or
other statistic, when used).
{params.dist.cbar}
{params.dist.cbar_ax}
{params.dist.cbar_kws}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.core.color}
{params.dist.log_scale}
{params.dist.legend}
{params.core.ax}
kwargs
Other keyword arguments are passed to one of the following matplotlib
functions:
- :meth:`matplotlib.axes.Axes.bar` (univariate, element="bars")
- :meth:`matplotlib.axes.Axes.fill_between` (univariate, other element, fill=True)
- :meth:`matplotlib.axes.Axes.plot` (univariate, other element, fill=False)
- :meth:`matplotlib.axes.Axes.pcolormesh` (bivariate)
Returns
-------
{returns.ax}
See Also
--------
{seealso.displot}
{seealso.kdeplot}
{seealso.rugplot}
{seealso.ecdfplot}
{seealso.jointplot}
Notes
-----
The choice of bins for computing and plotting a histogram can exert
substantial influence on the insights that one is able to draw from the
visualization. If the bins are too large, they may erase important features.
On the other hand, bins that are too small may be dominated by random
variability, obscuring the shape of the true underlying distribution. The
default bin size is determined using a reference rule that depends on the
sample size and variance. This works well in many cases, (i.e., with
"well-behaved" data) but it fails in others. It is always a good to try
different bin sizes to be sure that you are not missing something important.
This function allows you to specify bins in several different ways, such as
by setting the total number of bins to use, the width of each bin, or the
specific locations where the bins should break.
Examples
--------
.. include:: ../docstrings/histplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def kdeplot(
data=None, *, x=None, y=None, hue=None, weights=None,
palette=None, hue_order=None, hue_norm=None, color=None, fill=None,
multiple="layer", common_norm=True, common_grid=False, cumulative=False,
bw_method="scott", bw_adjust=1, warn_singular=True, log_scale=None,
levels=10, thresh=.05, gridsize=200, cut=3, clip=None,
legend=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None,
**kwargs,
):
kdeplot.__doc__ = """\
Plot univariate or bivariate distributions using kernel density estimation.
A kernel density estimate (KDE) plot is a method for visualizing the
distribution of observations in a dataset, analogous to a histogram. KDE
represents the data using a continuous probability density curve in one or
more dimensions.
The approach is explained further in the :ref:`user guide <tutorial_kde>`.
Relative to a histogram, KDE can produce a plot that is less cluttered and
more interpretable, especially when drawing multiple distributions. But it
has the potential to introduce distortions if the underlying distribution is
bounded or not smooth. Like a histogram, the quality of the representation
also depends on the selection of good smoothing parameters.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
weights : vector or key in ``data``
If provided, weight the kernel density estimation using these values.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.core.color}
fill : bool or None
If True, fill in the area under univariate density curves or between
bivariate contours. If None, the default depends on ``multiple``.
{params.dist.multiple}
common_norm : bool
If True, scale each conditional density by the number of observations
such that the total area under all densities sums to 1. Otherwise,
normalize each density independently.
common_grid : bool
If True, use the same evaluation grid for each kernel density estimate.
Only relevant with univariate data.
{params.kde.cumulative}
{params.kde.bw_method}
{params.kde.bw_adjust}
warn_singular : bool
If True, issue a warning when trying to estimate the density of data
with zero variance.
{params.dist.log_scale}
levels : int or vector
Number of contour levels or values to draw contours at. A vector argument
must have increasing values in [0, 1]. Levels correspond to iso-proportions
of the density: e.g., 20% of the probability mass will lie below the
contour drawn for 0.2. Only relevant with bivariate data.
thresh : number in [0, 1]
Lowest iso-proportion level at which to draw a contour line. Ignored when
``levels`` is a vector. Only relevant with bivariate data.
gridsize : int
Number of points on each dimension of the evaluation grid.
{params.kde.cut}
{params.kde.clip}
{params.dist.legend}
{params.dist.cbar}
{params.dist.cbar_ax}
{params.dist.cbar_kws}
{params.core.ax}
kwargs
Other keyword arguments are passed to one of the following matplotlib
functions:
- :meth:`matplotlib.axes.Axes.plot` (univariate, ``fill=False``),
- :meth:`matplotlib.axes.Axes.fill_between` (univariate, ``fill=True``),
- :meth:`matplotlib.axes.Axes.contour` (bivariate, ``fill=False``),
- :meth:`matplotlib.axes.contourf` (bivariate, ``fill=True``).
Returns
-------
{returns.ax}
See Also
--------
{seealso.displot}
{seealso.histplot}
{seealso.ecdfplot}
{seealso.jointplot}
{seealso.violinplot}
Notes
-----
The *bandwidth*, or standard deviation of the smoothing kernel, is an
important parameter. Misspecification of the bandwidth can produce a
distorted representation of the data. Much like the choice of bin width in a
histogram, an over-smoothed curve can erase true features of a
distribution, while an under-smoothed curve can create false features out of
random variability. The rule-of-thumb that sets the default bandwidth works
best when the true distribution is smooth, unimodal, and roughly bell-shaped.
It is always a good idea to check the default behavior by using ``bw_adjust``
to increase or decrease the amount of smoothing.
Because the smoothing algorithm uses a Gaussian kernel, the estimated density
curve can extend to values that do not make sense for a particular dataset.
For example, the curve may be drawn over negative values when smoothing data
that are naturally positive. The ``cut`` and ``clip`` parameters can be used
to control the extent of the curve, but datasets that have many observations
close to a natural boundary may be better served by a different visualization
method.
Similar considerations apply when a dataset is naturally discrete or "spiky"
(containing many repeated observations of the same value). Kernel density
estimation will always produce a smooth curve, which would be misleading
in these situations.
The units on the density axis are a common source of confusion. While kernel
density estimation produces a probability distribution, the height of the curve
at each point gives a density, not a probability. A probability can be obtained
only by integrating the density across a range. The curve is normalized so
that the integral over all possible values is 1, meaning that the scale of
the density axis depends on the data values.
Examples
--------
.. include:: ../docstrings/kdeplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def ecdfplot(
data=None, *,
# Vector variables
x=None, y=None, hue=None, weights=None,
# Computation parameters
stat="proportion", complementary=False,
# Hue mapping parameters
palette=None, hue_order=None, hue_norm=None,
# Axes information
log_scale=None, legend=True, ax=None,
# Other appearance keywords
**kwargs,
):
ecdfplot.__doc__ = """\
Plot empirical cumulative distribution functions.
An ECDF represents the proportion or count of observations falling below each
unique value in a dataset. Compared to a histogram or density plot, it has the
advantage that each observation is visualized directly, meaning that there are
no binning or smoothing parameters that need to be adjusted. It also aids direct
comparisons between multiple distributions. A downside is that the relationship
between the appearance of the plot and the basic properties of the distribution
(such as its central tendency, variance, and the presence of any bimodality)
may not be as intuitive.
More information is provided in the :ref:`user guide <tutorial_ecdf>`.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
weights : vector or key in ``data``
If provided, weight the contribution of the corresponding data points
towards the cumulative distribution using these values.
{params.ecdf.stat}
{params.ecdf.complementary}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.dist.log_scale}
{params.dist.legend}
{params.core.ax}
kwargs
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.plot`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.displot}
{seealso.histplot}
{seealso.kdeplot}
{seealso.rugplot}
Examples
--------
.. include:: ../docstrings/ecdfplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def rugplot(
data=None, *, x=None, y=None, hue=None, height=.025, expand_margins=True,
palette=None, hue_order=None, hue_norm=None, legend=True, ax=None, **kwargs
):
rugplot.__doc__ = """\
Plot marginal distributions by drawing ticks along the x and y axes.
This function is intended to complement other plots by showing the location
of individual observations in an unobtrusive way.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
height : float
Proportion of axes extent covered by each rug element. Can be negative.
expand_margins : bool
If True, increase the axes margins by the height of the rug to avoid
overlap with other elements.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
legend : bool
If False, do not add a legend for semantic variables.
{params.core.ax}
kwargs
Other keyword arguments are passed to
:meth:`matplotlib.collections.LineCollection`
Returns
-------
{returns.ax}
Examples
--------
.. include:: ../docstrings/rugplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
class KDE:
def __init__(
self, *,
bw_method=None,
bw_adjust=1,
gridsize=200,
cut=3,
clip=None,
cumulative=False,
):
def _define_support_grid(self, x, bw, cut, clip, gridsize):
def _define_support_univariate(self, x, weights):
def _define_support_bivariate(self, x1, x2, weights):
def define_support(self, x1, x2=None, weights=None, cache=True):
def _fit(self, fit_data, weights=None):
def _eval_univariate(self, x, weights=None):
def _eval_bivariate(self, x1, x2, weights=None):
def __call__(self, x1, x2=None, weights=None):
class Histogram:
def __init__(
self,
stat="count",
bins="auto",
binwidth=None,
binrange=None,
discrete=False,
cumulative=False,
):
def _define_bin_edges(self, x, weights, bins, binwidth, binrange, discrete):
def define_bin_params(self, x1, x2=None, weights=None, cache=True):
def _eval_bivariate(self, x1, x2, weights):
def _eval_univariate(self, x, weights):
def __call__(self, x1, x2=None, weights=None):
class ECDF:
def __init__(self, stat="proportion", complementary=False):
def _eval_bivariate(self, x1, x2, weights):
def _eval_univariate(self, x, weights):
def __call__(self, x1, x2=None, weights=None):
class FacetGrid(Grid):
def __init__(
self, data, *,
row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, height=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=False, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None,
):
def facet_data(self):
def map(self, func, *args, **kwargs):
def map_dataframe(self, func, *args, **kwargs):
def _facet_color(self, hue_index, kw_color):
def _facet_plot(self, func, ax, plot_args, plot_kwargs):
def _finalize_grid(self, axlabels):
def facet_axis(self, row_i, col_j, modify_state=True):
def despine(self, **kwargs):
def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):
def set_xlabels(self, label=None, clear_inner=True, **kwargs):
def set_ylabels(self, label=None, clear_inner=True, **kwargs):
def set_xticklabels(self, labels=None, step=None, **kwargs):
def set_yticklabels(self, labels=None, **kwargs):
def set_titles(self, template=None, row_template=None, col_template=None,
**kwargs):
def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):
def axes(self):
def ax(self):
def axes_dict(self):
def _inner_axes(self):
def _left_axes(self):
def _not_left_axes(self):
def _bottom_axes(self):
def _not_bottom_axes(self):
def _check_argument(param, options, value):
def _assign_default_kwargs(kws, call_func, source_func):
def displot(
data=None, *,
# Vector variables
x=None, y=None, hue=None, row=None, col=None, weights=None,
# Other plot parameters
kind="hist", rug=False, rug_kws=None, log_scale=None, legend=True,
# Hue-mapping parameters
palette=None, hue_order=None, hue_norm=None, color=None,
# Faceting parameters
col_wrap=None, row_order=None, col_order=None,
height=5, aspect=1, facet_kws=None,
**kwargs,
):
p = _DistributionFacetPlotter(
data=data,
variables=_DistributionFacetPlotter.get_semantics(locals())
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
_check_argument("kind", ["hist", "kde", "ecdf"], kind)
# --- Initialize the FacetGrid object
# Check for attempt to plot onto specific axes and warn
if "ax" in kwargs:
msg = (
"`displot` is a figure-level function and does not accept "
"the ax= parameter. You may wish to try {}plot.".format(kind)
)
warnings.warn(msg, UserWarning)
kwargs.pop("ax")
for var in ["row", "col"]:
# Handle faceting variables that lack name information
if var in p.variables and p.variables[var] is None:
p.variables[var] = f"_{var}_"
# Adapt the plot_data dataframe for use with FacetGrid
grid_data = p.plot_data.rename(columns=p.variables)
grid_data = grid_data.loc[:, ~grid_data.columns.duplicated()]
col_name = p.variables.get("col")
row_name = p.variables.get("row")
if facet_kws is None:
facet_kws = {}
g = FacetGrid(
data=grid_data, row=row_name, col=col_name,
col_wrap=col_wrap, row_order=row_order,
col_order=col_order, height=height,
aspect=aspect,
**facet_kws,
)
# Now attach the axes object to the plotter object
if kind == "kde":
allowed_types = ["numeric", "datetime"]
else:
allowed_types = None
p._attach(g, allowed_types=allowed_types, log_scale=log_scale)
# Check for a specification that lacks x/y data and return early
if not p.has_xy_data:
return g
if color is None and hue is None:
color = "C0"
# XXX else warn if hue is not None?
kwargs["legend"] = legend
# --- Draw the plots
if kind == "hist":
hist_kws = kwargs.copy()
# Extract the parameters that will go directly to Histogram
estimate_defaults = {}
_assign_default_kwargs(estimate_defaults, Histogram.__init__, histplot)
estimate_kws = {}
for key, default_val in estimate_defaults.items():
estimate_kws[key] = hist_kws.pop(key, default_val)
# Handle derivative defaults
if estimate_kws["discrete"] is None:
estimate_kws["discrete"] = p._default_discrete()
hist_kws["estimate_kws"] = estimate_kws
hist_kws.setdefault("color", color)
if p.univariate:
_assign_default_kwargs(hist_kws, p.plot_univariate_histogram, histplot)
p.plot_univariate_histogram(**hist_kws)
else:
_assign_default_kwargs(hist_kws, p.plot_bivariate_histogram, histplot)
p.plot_bivariate_histogram(**hist_kws)
elif kind == "kde":
kde_kws = kwargs.copy()
# Extract the parameters that will go directly to KDE
estimate_defaults = {}
_assign_default_kwargs(estimate_defaults, KDE.__init__, kdeplot)
estimate_kws = {}
for key, default_val in estimate_defaults.items():
estimate_kws[key] = kde_kws.pop(key, default_val)
kde_kws["estimate_kws"] = estimate_kws
kde_kws["color"] = color
if p.univariate:
_assign_default_kwargs(kde_kws, p.plot_univariate_density, kdeplot)
p.plot_univariate_density(**kde_kws)
else:
_assign_default_kwargs(kde_kws, p.plot_bivariate_density, kdeplot)
p.plot_bivariate_density(**kde_kws)
elif kind == "ecdf":
ecdf_kws = kwargs.copy()
# Extract the parameters that will go directly to the estimator
estimate_kws = {}
estimate_defaults = {}
_assign_default_kwargs(estimate_defaults, ECDF.__init__, ecdfplot)
for key, default_val in estimate_defaults.items():
estimate_kws[key] = ecdf_kws.pop(key, default_val)
ecdf_kws["estimate_kws"] = estimate_kws
ecdf_kws["color"] = color
if p.univariate:
_assign_default_kwargs(ecdf_kws, p.plot_univariate_ecdf, ecdfplot)
p.plot_univariate_ecdf(**ecdf_kws)
else:
raise NotImplementedError("Bivariate ECDF plots are not implemented")
# All plot kinds can include a rug
if rug:
# TODO with expand_margins=True, each facet expands margins... annoying!
if rug_kws is None:
rug_kws = {}
_assign_default_kwargs(rug_kws, p.plot_rug, rugplot)
rug_kws["legend"] = False
if color is not None:
rug_kws["color"] = color
p.plot_rug(**rug_kws)
# Call FacetGrid annotation methods
# Note that the legend is currently set inside the plotting method
g.set_axis_labels(
x_var=p.variables.get("x", g.axes.flat[0].get_xlabel()),
y_var=p.variables.get("y", g.axes.flat[0].get_ylabel()),
)
g.set_titles()
g.tight_layout()
if data is not None and (x is not None or y is not None):
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
g.data = pd.merge(
data,
g.data[g.data.columns.difference(data.columns)],
left_index=True,
right_index=True,
)
else:
wide_cols = {
k: f"_{k}_" if v is None else v for k, v in p.variables.items()
}
g.data = p.plot_data.rename(columns=wide_cols)
return g | null |
171,776 | from numbers import Number
from functools import partial
import math
import textwrap
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as tx
from matplotlib.colors import to_rgba
from matplotlib.collections import LineCollection
from ._oldcore import (
VectorPlotter,
)
from ._statistics import ECDF, Histogram, KDE
from ._stats.counting import Hist
from .axisgrid import (
FacetGrid,
_facet_docs,
)
from .utils import (
remove_na,
_kde_support,
_normalize_kwargs,
_check_argument,
_assign_default_kwargs,
_default_color,
)
from .palettes import color_palette
from .external import husl
from .external.kde import gaussian_kde
from ._docstrings import (
DocstringComponents,
_core_docs,
)
def kdeplot(
data=None, *, x=None, y=None, hue=None, weights=None,
palette=None, hue_order=None, hue_norm=None, color=None, fill=None,
multiple="layer", common_norm=True, common_grid=False, cumulative=False,
bw_method="scott", bw_adjust=1, warn_singular=True, log_scale=None,
levels=10, thresh=.05, gridsize=200, cut=3, clip=None,
legend=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None,
**kwargs,
):
# --- Start with backwards compatability for versions < 0.11.0 ----------------
# Handle (past) deprecation of `data2`
if "data2" in kwargs:
msg = "`data2` has been removed (replaced by `y`); please update your code."
TypeError(msg)
# Handle deprecation of `vertical`
vertical = kwargs.pop("vertical", None)
if vertical is not None:
if vertical:
action_taken = "assigning data to `y`."
if x is None:
data, y = y, data
else:
x, y = y, x
else:
action_taken = "assigning data to `x`."
msg = textwrap.dedent(f"""\n
The `vertical` parameter is deprecated; {action_taken}
This will become an error in seaborn v0.13.0; please update your code.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
# Handle deprecation of `bw`
bw = kwargs.pop("bw", None)
if bw is not None:
msg = textwrap.dedent(f"""\n
The `bw` parameter is deprecated in favor of `bw_method` and `bw_adjust`.
Setting `bw_method={bw}`, but please see the docs for the new parameters
and update your code. This will become an error in seaborn v0.13.0.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
bw_method = bw
# Handle deprecation of `kernel`
if kwargs.pop("kernel", None) is not None:
msg = textwrap.dedent("""\n
Support for alternate kernels has been removed; using Gaussian kernel.
This will become an error in seaborn v0.13.0; please update your code.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
# Handle deprecation of shade_lowest
shade_lowest = kwargs.pop("shade_lowest", None)
if shade_lowest is not None:
if shade_lowest:
thresh = 0
msg = textwrap.dedent(f"""\n
`shade_lowest` has been replaced by `thresh`; setting `thresh={thresh}.
This will become an error in seaborn v0.13.0; please update your code.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
# Handle "soft" deprecation of shade `shade` is not really the right
# terminology here, but unlike some of the other deprecated parameters it
# is probably very commonly used and much hard to remove. This is therefore
# going to be a longer process where, first, `fill` will be introduced and
# be used throughout the documentation. In 0.12, when kwarg-only
# enforcement hits, we can remove the shade/shade_lowest out of the
# function signature all together and pull them out of the kwargs. Then we
# can actually fire a FutureWarning, and eventually remove.
shade = kwargs.pop("shade", None)
if shade is not None:
fill = shade
msg = textwrap.dedent(f"""\n
`shade` is now deprecated in favor of `fill`; setting `fill={shade}`.
This will become an error in seaborn v0.14.0; please update your code.
""")
warnings.warn(msg, FutureWarning, stacklevel=2)
# Handle `n_levels`
# This was never in the formal API but it was processed, and appeared in an
# example. We can treat as an alias for `levels` now and deprecate later.
levels = kwargs.pop("n_levels", levels)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
p = _DistributionPlotter(
data=data,
variables=_DistributionPlotter.get_semantics(locals()),
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
if ax is None:
ax = plt.gca()
p._attach(ax, allowed_types=["numeric", "datetime"], log_scale=log_scale)
method = ax.fill_between if fill else ax.plot
color = _default_color(method, hue, color, kwargs)
if not p.has_xy_data:
return ax
# Pack the kwargs for statistics.KDE
estimate_kws = dict(
bw_method=bw_method,
bw_adjust=bw_adjust,
gridsize=gridsize,
cut=cut,
clip=clip,
cumulative=cumulative,
)
if p.univariate:
plot_kws = kwargs.copy()
p.plot_univariate_density(
multiple=multiple,
common_norm=common_norm,
common_grid=common_grid,
fill=fill,
color=color,
legend=legend,
warn_singular=warn_singular,
estimate_kws=estimate_kws,
**plot_kws,
)
else:
p.plot_bivariate_density(
common_norm=common_norm,
fill=fill,
levels=levels,
thresh=thresh,
legend=legend,
color=color,
warn_singular=warn_singular,
cbar=cbar,
cbar_ax=cbar_ax,
cbar_kws=cbar_kws,
estimate_kws=estimate_kws,
**kwargs,
)
return ax
kdeplot.__doc__ = """\
Plot univariate or bivariate distributions using kernel density estimation.
A kernel density estimate (KDE) plot is a method for visualizing the
distribution of observations in a dataset, analogous to a histogram. KDE
represents the data using a continuous probability density curve in one or
more dimensions.
The approach is explained further in the :ref:`user guide <tutorial_kde>`.
Relative to a histogram, KDE can produce a plot that is less cluttered and
more interpretable, especially when drawing multiple distributions. But it
has the potential to introduce distortions if the underlying distribution is
bounded or not smooth. Like a histogram, the quality of the representation
also depends on the selection of good smoothing parameters.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
weights : vector or key in ``data``
If provided, weight the kernel density estimation using these values.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.core.color}
fill : bool or None
If True, fill in the area under univariate density curves or between
bivariate contours. If None, the default depends on ``multiple``.
{params.dist.multiple}
common_norm : bool
If True, scale each conditional density by the number of observations
such that the total area under all densities sums to 1. Otherwise,
normalize each density independently.
common_grid : bool
If True, use the same evaluation grid for each kernel density estimate.
Only relevant with univariate data.
{params.kde.cumulative}
{params.kde.bw_method}
{params.kde.bw_adjust}
warn_singular : bool
If True, issue a warning when trying to estimate the density of data
with zero variance.
{params.dist.log_scale}
levels : int or vector
Number of contour levels or values to draw contours at. A vector argument
must have increasing values in [0, 1]. Levels correspond to iso-proportions
of the density: e.g., 20% of the probability mass will lie below the
contour drawn for 0.2. Only relevant with bivariate data.
thresh : number in [0, 1]
Lowest iso-proportion level at which to draw a contour line. Ignored when
``levels`` is a vector. Only relevant with bivariate data.
gridsize : int
Number of points on each dimension of the evaluation grid.
{params.kde.cut}
{params.kde.clip}
{params.dist.legend}
{params.dist.cbar}
{params.dist.cbar_ax}
{params.dist.cbar_kws}
{params.core.ax}
kwargs
Other keyword arguments are passed to one of the following matplotlib
functions:
- :meth:`matplotlib.axes.Axes.plot` (univariate, ``fill=False``),
- :meth:`matplotlib.axes.Axes.fill_between` (univariate, ``fill=True``),
- :meth:`matplotlib.axes.Axes.contour` (bivariate, ``fill=False``),
- :meth:`matplotlib.axes.contourf` (bivariate, ``fill=True``).
Returns
-------
{returns.ax}
See Also
--------
{seealso.displot}
{seealso.histplot}
{seealso.ecdfplot}
{seealso.jointplot}
{seealso.violinplot}
Notes
-----
The *bandwidth*, or standard deviation of the smoothing kernel, is an
important parameter. Misspecification of the bandwidth can produce a
distorted representation of the data. Much like the choice of bin width in a
histogram, an over-smoothed curve can erase true features of a
distribution, while an under-smoothed curve can create false features out of
random variability. The rule-of-thumb that sets the default bandwidth works
best when the true distribution is smooth, unimodal, and roughly bell-shaped.
It is always a good idea to check the default behavior by using ``bw_adjust``
to increase or decrease the amount of smoothing.
Because the smoothing algorithm uses a Gaussian kernel, the estimated density
curve can extend to values that do not make sense for a particular dataset.
For example, the curve may be drawn over negative values when smoothing data
that are naturally positive. The ``cut`` and ``clip`` parameters can be used
to control the extent of the curve, but datasets that have many observations
close to a natural boundary may be better served by a different visualization
method.
Similar considerations apply when a dataset is naturally discrete or "spiky"
(containing many repeated observations of the same value). Kernel density
estimation will always produce a smooth curve, which would be misleading
in these situations.
The units on the density axis are a common source of confusion. While kernel
density estimation produces a probability distribution, the height of the curve
at each point gives a density, not a probability. A probability can be obtained
only by integrating the density across a range. The curve is normalized so
that the integral over all possible values is 1, meaning that the scale of
the density axis depends on the data values.
Examples
--------
.. include:: ../docstrings/kdeplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def rugplot(
data=None, *, x=None, y=None, hue=None, height=.025, expand_margins=True,
palette=None, hue_order=None, hue_norm=None, legend=True, ax=None, **kwargs
):
# A note: I think it would make sense to add multiple= to rugplot and allow
# rugs for different hue variables to be shifted orthogonal to the data axis
# But is this stacking, or dodging?
# A note: if we want to add a style semantic to rugplot,
# we could make an option that draws the rug using scatterplot
# A note, it would also be nice to offer some kind of histogram/density
# rugplot, since alpha blending doesn't work great in the large n regime
# --- Start with backwards compatability for versions < 0.11.0 ----------------
a = kwargs.pop("a", None)
axis = kwargs.pop("axis", None)
if a is not None:
data = a
msg = textwrap.dedent("""\n
The `a` parameter has been replaced; use `x`, `y`, and/or `data` instead.
Please update your code; This will become an error in seaborn v0.13.0.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
if axis is not None:
if axis == "x":
x = data
elif axis == "y":
y = data
msg = textwrap.dedent(f"""\n
The `axis` parameter has been deprecated; use the `{axis}` parameter instead.
Please update your code; this will become an error in seaborn v0.13.0.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
vertical = kwargs.pop("vertical", None)
if vertical is not None:
if vertical:
action_taken = "assigning data to `y`."
if x is None:
data, y = y, data
else:
x, y = y, x
else:
action_taken = "assigning data to `x`."
msg = textwrap.dedent(f"""\n
The `vertical` parameter is deprecated; {action_taken}
This will become an error in seaborn v0.13.0; please update your code.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
weights = None
p = _DistributionPlotter(
data=data,
variables=_DistributionPlotter.get_semantics(locals()),
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
if ax is None:
ax = plt.gca()
p._attach(ax)
color = kwargs.pop("color", kwargs.pop("c", None))
kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
if not p.has_xy_data:
return ax
p.plot_rug(height, expand_margins, legend, **kwargs)
return ax
rugplot.__doc__ = """\
Plot marginal distributions by drawing ticks along the x and y axes.
This function is intended to complement other plots by showing the location
of individual observations in an unobtrusive way.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
height : float
Proportion of axes extent covered by each rug element. Can be negative.
expand_margins : bool
If True, increase the axes margins by the height of the rug to avoid
overlap with other elements.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
legend : bool
If False, do not add a legend for semantic variables.
{params.core.ax}
kwargs
Other keyword arguments are passed to
:meth:`matplotlib.collections.LineCollection`
Returns
-------
{returns.ax}
Examples
--------
.. include:: ../docstrings/rugplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From https://stats.stackexchange.com/questions/798/
a = np.asarray(a)
if len(a) < 2:
return 1
iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))
h = 2 * iqr / (len(a) ** (1 / 3))
# fall back to sqrt(a) bins if iqr is 0
if h == 0:
return int(np.sqrt(a.size))
else:
return int(np.ceil((a.max() - a.min()) / h))
def remove_na(vector):
"""Helper method for removing null values from data vectors.
Parameters
----------
vector : vector object
Must implement boolean masking with [] subscript syntax.
Returns
-------
clean_clean : same type as ``vector``
Vector of data with null values removed. May be a copy or a view.
"""
return vector[pd.notnull(vector)]
def _kde_support(data, bw, gridsize, cut, clip):
"""Establish support for a kernel density estimate."""
support_min = max(data.min() - bw * cut, clip[0])
support_max = min(data.max() + bw * cut, clip[1])
support = np.linspace(support_min, support_max, gridsize)
return support
class gaussian_kde:
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
weights : array_like, optional
weights of datapoints. This must be the same shape as dataset.
If None (default), the samples are assumed to be equally weighted
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
neff : int
Effective number of datapoints.
.. versionadded:: 1.2.0
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
In the case of unequally weighted points, `scotts_factor` becomes::
neff**(-1./(d+4)),
with ``neff`` the effective number of datapoints.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
or in the case of unequally weighted points::
(neff * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
With a set of weighted samples, the effective number of datapoints ``neff``
is defined by::
neff = sum(weights)^2 / sum(weights^2)
as detailed in [5]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
.. [5] Gray P. G., 1969, Journal of the Royal Statistical Society.
Series A (General), 132, 272
"""
def __init__(self, dataset, bw_method=None, weights=None):
self.dataset = atleast_2d(asarray(dataset))
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
if weights is not None:
self._weights = atleast_1d(weights).astype(float)
self._weights /= sum(self._weights)
if self.weights.ndim != 1:
raise ValueError("`weights` input should be one-dimensional.")
if len(self._weights) != self.n:
raise ValueError("`weights` input should be of length n")
self._neff = 1/sum(self._weights**2)
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(asarray(points))
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = f"points have dimension {d}, dataset has dimension {self.d}"
raise ValueError(msg)
output_dtype = np.common_type(self.covariance, points)
result = zeros((m,), dtype=output_dtype)
whitening = linalg.cholesky(self.inv_cov)
scaled_dataset = dot(whitening, self.dataset)
scaled_points = dot(whitening, points)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = scaled_dataset[:, i, newaxis] - scaled_points
energy = sum(diff * diff, axis=0) / 2.0
result += self.weights[i]*exp(-energy)
else:
# loop over points
for i in range(m):
diff = scaled_dataset - scaled_points[:, i, newaxis]
energy = sum(diff * diff, axis=0) / 2.0
result[i] = sum(exp(-energy)*self.weights, axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def scotts_factor(self):
"""Compute Scott's factor.
Returns
-------
s : float
Scott's factor.
"""
return power(self.neff, -1./(self.d+4))
def silverman_factor(self):
"""Compute the Silverman factor.
Returns
-------
s : float
The silverman factor.
"""
return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, str):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,
bias=False,
aweights=self.weights))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def weights(self):
try:
return self._weights
except AttributeError:
self._weights = ones(self.n)/self.n
return self._weights
def neff(self):
try:
return self._neff
except AttributeError:
self._neff = 1/sum(self.weights**2)
return self._neff
The provided code snippet includes necessary dependencies for implementing the `distplot` function. Write a Python function `def distplot(a=None, bins=None, hist=True, kde=True, rug=False, fit=None, hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None, color=None, vertical=False, norm_hist=False, axlabel=None, label=None, ax=None, x=None)` to solve the following problem:
DEPRECATED This function has been deprecated and will be removed in seaborn v0.14.0. It has been replaced by :func:`histplot` and :func:`displot`, two functions with a modern API and many more capabilities. For a guide to updating, please see this notebook: https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751
Here is the function:
def distplot(a=None, bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
color=None, vertical=False, norm_hist=False, axlabel=None,
label=None, ax=None, x=None):
"""
DEPRECATED
This function has been deprecated and will be removed in seaborn v0.14.0.
It has been replaced by :func:`histplot` and :func:`displot`, two functions
with a modern API and many more capabilities.
For a guide to updating, please see this notebook:
https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751
"""
if kde and not hist:
axes_level_suggestion = (
"`kdeplot` (an axes-level function for kernel density plots)"
)
else:
axes_level_suggestion = (
"`histplot` (an axes-level function for histograms)"
)
msg = textwrap.dedent(f"""
`distplot` is a deprecated function and will be removed in seaborn v0.14.0.
Please adapt your code to use either `displot` (a figure-level function with
similar flexibility) or {axes_level_suggestion}.
For a guide to updating your code to use the new functions, please see
https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751
""")
warnings.warn(msg, UserWarning, stacklevel=2)
if ax is None:
ax = plt.gca()
# Intelligently label the support axis
label_ax = bool(axlabel)
if axlabel is None and hasattr(a, "name"):
axlabel = a.name
if axlabel is not None:
label_ax = True
# Support new-style API
if x is not None:
a = x
# Make a a 1-d float array
a = np.asarray(a, float)
if a.ndim > 1:
a = a.squeeze()
# Drop null values from array
a = remove_na(a)
# Decide if the hist is normed
norm_hist = norm_hist or kde or (fit is not None)
# Handle dictionary defaults
hist_kws = {} if hist_kws is None else hist_kws.copy()
kde_kws = {} if kde_kws is None else kde_kws.copy()
rug_kws = {} if rug_kws is None else rug_kws.copy()
fit_kws = {} if fit_kws is None else fit_kws.copy()
# Get the color from the current color cycle
if color is None:
if vertical:
line, = ax.plot(0, a.mean())
else:
line, = ax.plot(a.mean(), 0)
color = line.get_color()
line.remove()
# Plug the label into the right kwarg dictionary
if label is not None:
if hist:
hist_kws["label"] = label
elif kde:
kde_kws["label"] = label
elif rug:
rug_kws["label"] = label
elif fit:
fit_kws["label"] = label
if hist:
if bins is None:
bins = min(_freedman_diaconis_bins(a), 50)
hist_kws.setdefault("alpha", 0.4)
hist_kws.setdefault("density", norm_hist)
orientation = "horizontal" if vertical else "vertical"
hist_color = hist_kws.pop("color", color)
ax.hist(a, bins, orientation=orientation,
color=hist_color, **hist_kws)
if hist_color != color:
hist_kws["color"] = hist_color
axis = "y" if vertical else "x"
if kde:
kde_color = kde_kws.pop("color", color)
kdeplot(**{axis: a}, ax=ax, color=kde_color, **kde_kws)
if kde_color != color:
kde_kws["color"] = kde_color
if rug:
rug_color = rug_kws.pop("color", color)
rugplot(**{axis: a}, ax=ax, color=rug_color, **rug_kws)
if rug_color != color:
rug_kws["color"] = rug_color
if fit is not None:
def pdf(x):
return fit.pdf(x, *params)
fit_color = fit_kws.pop("color", "#282828")
gridsize = fit_kws.pop("gridsize", 200)
cut = fit_kws.pop("cut", 3)
clip = fit_kws.pop("clip", (-np.inf, np.inf))
bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)
x = _kde_support(a, bw, gridsize, cut, clip)
params = fit.fit(a)
y = pdf(x)
if vertical:
x, y = y, x
ax.plot(x, y, color=fit_color, **fit_kws)
if fit_color != "#282828":
fit_kws["color"] = fit_color
if label_ax:
if vertical:
ax.set_ylabel(axlabel)
else:
ax.set_xlabel(axlabel)
return ax | DEPRECATED This function has been deprecated and will be removed in seaborn v0.14.0. It has been replaced by :func:`histplot` and :func:`displot`, two functions with a modern API and many more capabilities. For a guide to updating, please see this notebook: https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751 |
171,777 | from textwrap import dedent
from numbers import Number
import warnings
from colorsys import rgb_to_hls
from functools import partial
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.collections import PatchCollection
import matplotlib.patches as Patches
import matplotlib.pyplot as plt
from seaborn._oldcore import (
variable_type,
infer_orient,
categorical_order,
)
from seaborn.relational import _RelationalPlotter
from seaborn import utils
from seaborn.utils import remove_na, _normal_quantile_func, _draw_figure, _default_color
from seaborn._statistics import EstimateAggregator
from seaborn.palettes import color_palette, husl_palette, light_palette, dark_palette
from seaborn.axisgrid import FacetGrid, _facet_docs
class _BoxPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, dodge, fliersize, linewidth):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.dodge = dodge
self.width = width
self.fliersize = fliersize
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def draw_boxplot(self, ax, kws):
"""Use matplotlib to draw a boxplot on an Axes."""
vert = self.orient == "v"
props = {}
for obj in ["box", "whisker", "cap", "median", "flier"]:
props[obj] = kws.pop(obj + "props", {})
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Handle case where there is data at this level
if group_data.size == 0:
continue
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = np.asarray(remove_na(group_data))
# Handle case where there is no non-null data
if box_data.size == 0:
continue
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[i],
widths=self.width,
**kws)
color = self.colors[i]
self.restyle_boxplot(artist_dict, color, props)
else:
# Draw nested groups of boxes
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Add a legend for this hue level
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle case where there is data at this level
if group_data.size == 0:
continue
hue_mask = self.plot_hues[i] == hue_level
box_data = np.asarray(remove_na(group_data[hue_mask]))
# Handle case where there is no non-null data
if box_data.size == 0:
continue
center = i + offsets[j]
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[center],
widths=self.nested_width,
**kws)
self.restyle_boxplot(artist_dict, self.colors[j], props)
# Add legend data, but just for one set of boxes
def restyle_boxplot(self, artist_dict, color, props):
"""Take a drawn matplotlib boxplot and make it look nice."""
for box in artist_dict["boxes"]:
box.update(dict(facecolor=color,
zorder=.9,
edgecolor=self.gray,
linewidth=self.linewidth))
box.update(props["box"])
for whisk in artist_dict["whiskers"]:
whisk.update(dict(color=self.gray,
linewidth=self.linewidth,
linestyle="-"))
whisk.update(props["whisker"])
for cap in artist_dict["caps"]:
cap.update(dict(color=self.gray,
linewidth=self.linewidth))
cap.update(props["cap"])
for med in artist_dict["medians"]:
med.update(dict(color=self.gray,
linewidth=self.linewidth))
med.update(props["median"])
for fly in artist_dict["fliers"]:
fly.update(dict(markerfacecolor=self.gray,
marker="d",
markeredgecolor=self.gray,
markersize=self.fliersize))
fly.update(props["flier"])
def plot(self, ax, boxplot_kws):
"""Make the plot."""
self.draw_boxplot(ax, boxplot_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
def boxplot(
data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75, width=.8,
dodge=True, fliersize=5, linewidth=None, whis=1.5, ax=None,
**kwargs
):
plotter = _BoxPlotter(x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, dodge, fliersize, linewidth)
if ax is None:
ax = plt.gca()
kwargs.update(dict(whis=whis))
plotter.plot(ax, kwargs)
return ax | null |
171,778 | from textwrap import dedent
from numbers import Number
import warnings
from colorsys import rgb_to_hls
from functools import partial
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.collections import PatchCollection
import matplotlib.patches as Patches
import matplotlib.pyplot as plt
from seaborn._oldcore import (
variable_type,
infer_orient,
categorical_order,
)
from seaborn.relational import _RelationalPlotter
from seaborn import utils
from seaborn.utils import remove_na, _normal_quantile_func, _draw_figure, _default_color
from seaborn._statistics import EstimateAggregator
from seaborn.palettes import color_palette, husl_palette, light_palette, dark_palette
from seaborn.axisgrid import FacetGrid, _facet_docs
class _ViolinPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, dodge, orient, linewidth,
color, palette, saturation):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.estimate_densities(bw, cut, scale, scale_hue, gridsize)
self.gridsize = gridsize
self.width = width
self.dodge = dodge
if inner is not None:
if not any([inner.startswith("quart"),
inner.startswith("box"),
inner.startswith("stick"),
inner.startswith("point")]):
err = f"Inner style '{inner}' not recognized"
raise ValueError(err)
self.inner = inner
if split and self.hue_names is not None and len(self.hue_names) != 2:
msg = "There must be exactly two hue levels to use `split`.'"
raise ValueError(msg)
self.split = split
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):
"""Find the support and density for all of the data."""
# Initialize data structures to keep track of plotting data
if self.hue_names is None:
support = []
density = []
counts = np.zeros(len(self.plot_data))
max_density = np.zeros(len(self.plot_data))
else:
support = [[] for _ in self.plot_data]
density = [[] for _ in self.plot_data]
size = len(self.group_names), len(self.hue_names)
counts = np.zeros(size)
max_density = np.zeros(size)
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
# Strip missing datapoints
kde_data = remove_na(group_data)
# Handle special case of no data at this level
if kde_data.size == 0:
support.append(np.array([]))
density.append(np.array([1.]))
counts[i] = 0
max_density[i] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support.append(np.unique(kde_data))
density.append(np.array([1.]))
counts[i] = 1
max_density[i] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_i = self.kde_support(kde_data, bw_used, cut, gridsize)
density_i = kde.evaluate(support_i)
# Update the data structures with these results
support.append(support_i)
density.append(density_i)
counts[i] = kde_data.size
max_density[i] = density_i.max()
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
# Handle special case of no data at this category level
if not group_data.size:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Select out the observations for this hue level
hue_mask = self.plot_hues[i] == hue_level
# Strip missing datapoints
kde_data = remove_na(group_data[hue_mask])
# Handle special case of no data at this level
if kde_data.size == 0:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support[i].append(np.unique(kde_data))
density[i].append(np.array([1.]))
counts[i, j] = 1
max_density[i, j] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_ij = self.kde_support(kde_data, bw_used,
cut, gridsize)
density_ij = kde.evaluate(support_ij)
# Update the data structures with these results
support[i].append(support_ij)
density[i].append(density_ij)
counts[i, j] = kde_data.size
max_density[i, j] = density_ij.max()
# Scale the height of the density curve.
# For a violinplot the density is non-quantitative.
# The objective here is to scale the curves relative to 1 so that
# they can be multiplied by the width parameter during plotting.
if scale == "area":
self.scale_area(density, max_density, scale_hue)
elif scale == "width":
self.scale_width(density)
elif scale == "count":
self.scale_count(density, counts, scale_hue)
else:
raise ValueError(f"scale method '{scale}' not recognized")
# Set object attributes that will be used while plotting
self.support = support
self.density = density
def fit_kde(self, x, bw):
"""Estimate a KDE for a vector of data with flexible bandwidth."""
kde = gaussian_kde(x, bw)
# Extract the numeric bandwidth from the KDE object
bw_used = kde.factor
# At this point, bw will be a numeric scale factor.
# To get the actual bandwidth of the kernel, we multiple by the
# unbiased standard deviation of the data, which we will use
# elsewhere to compute the range of the support.
bw_used = bw_used * x.std(ddof=1)
return kde, bw_used
def kde_support(self, x, bw, cut, gridsize):
"""Define a grid of support for the violin."""
support_min = x.min() - bw * cut
support_max = x.max() + bw * cut
return np.linspace(support_min, support_max, gridsize)
def scale_area(self, density, max_density, scale_hue):
"""Scale the relative area under the KDE curve.
This essentially preserves the "standard" KDE scaling, but the
resulting maximum density will be 1 so that the curve can be
properly multiplied by the violin width.
"""
if self.hue_names is None:
for d in density:
if d.size > 1:
d /= max_density.max()
else:
for i, group in enumerate(density):
for d in group:
if scale_hue:
max = max_density[i].max()
else:
max = max_density.max()
if d.size > 1:
d /= max
def scale_width(self, density):
"""Scale each density curve to the same height."""
if self.hue_names is None:
for d in density:
d /= d.max()
else:
for group in density:
for d in group:
d /= d.max()
def scale_count(self, density, counts, scale_hue):
"""Scale each density curve by the number of observations."""
if self.hue_names is None:
if counts.max() == 0:
d = 0
else:
for count, d in zip(counts, density):
d /= d.max()
d *= count / counts.max()
else:
for i, group in enumerate(density):
for j, d in enumerate(group):
if counts[i].max() == 0:
d = 0
else:
count = counts[i, j]
if scale_hue:
scaler = count / counts[i].max()
else:
scaler = count / counts.max()
d /= d.max()
d *= scaler
def dwidth(self):
if self.hue_names is None or not self.dodge:
return self.width / 2
elif self.split:
return self.width / 2
else:
return self.width / (2 * len(self.hue_names))
def draw_violins(self, ax):
"""Draw the violins onto `ax`."""
fill_func = ax.fill_betweenx if self.orient == "v" else ax.fill_between
for i, group_data in enumerate(self.plot_data):
kws = dict(edgecolor=self.gray, linewidth=self.linewidth)
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
support, density = self.support[i], self.density[i]
# Handle special case of no observations in this bin
if support.size == 0:
continue
# Handle special case of a single observation
elif support.size == 1:
val = support.item()
d = density.item()
self.draw_single_observation(ax, i, val, d)
continue
# Draw the violin for this group
grid = np.ones(self.gridsize) * i
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
facecolor=self.colors[i],
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, i)
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data, support, density, i)
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data, support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
support, density = self.support[i][j], self.density[i][j]
kws["facecolor"] = self.colors[j]
# Add legend data, but just for one set of violins
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle the special case where we have no observations
if support.size == 0:
continue
# Handle the special case where we have one observation
elif support.size == 1:
val = support.item()
d = density.item()
if self.split:
d = d / 2
at_group = i + offsets[j]
self.draw_single_observation(ax, at_group, val, d)
continue
# Option 2a: we are drawing a single split violin
# -----------------------------------------------
if self.split:
grid = np.ones(self.gridsize) * i
if j:
fill_func(support,
grid,
grid + density * self.dwidth,
**kws)
else:
fill_func(support,
grid - density * self.dwidth,
grid,
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw quartile lines
if self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density, i,
["left", "right"][j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density, i,
["left", "right"][j])
# The box and point interior plots are drawn for
# all data at the group level, so we just do that once
if j and any(self.plot_hues[0] == hue_level):
continue
# Get the whole vector for this group level
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2b: we are drawing full nested violins
# -----------------------------------------------
else:
grid = np.ones(self.gridsize) * (i + offsets[j])
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
**kws)
# Draw the interior representation
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, i + offsets[j])
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density,
i + offsets[j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i + offsets[j])
def draw_single_observation(self, ax, at_group, at_quant, density):
"""Draw a line to mark a single observation."""
d_width = density * self.dwidth
if self.orient == "v":
ax.plot([at_group - d_width, at_group + d_width],
[at_quant, at_quant],
color=self.gray,
linewidth=self.linewidth)
else:
ax.plot([at_quant, at_quant],
[at_group - d_width, at_group + d_width],
color=self.gray,
linewidth=self.linewidth)
def draw_box_lines(self, ax, data, center):
"""Draw boxplot information at center of the density."""
# Compute the boxplot statistics
q25, q50, q75 = np.percentile(data, [25, 50, 75])
whisker_lim = 1.5 * (q75 - q25)
h1 = np.min(data[data >= (q25 - whisker_lim)])
h2 = np.max(data[data <= (q75 + whisker_lim)])
# Draw a boxplot using lines and a point
if self.orient == "v":
ax.plot([center, center], [h1, h2],
linewidth=self.linewidth,
color=self.gray)
ax.plot([center, center], [q25, q75],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(center, q50,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
else:
ax.plot([h1, h2], [center, center],
linewidth=self.linewidth,
color=self.gray)
ax.plot([q25, q75], [center, center],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(q50, center,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
def draw_quartiles(self, ax, data, support, density, center, split=False):
"""Draw the quartiles as lines at width of density."""
q25, q50, q75 = np.percentile(data, [25, 50, 75])
self.draw_to_density(ax, center, q25, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
self.draw_to_density(ax, center, q50, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 3] * 2)
self.draw_to_density(ax, center, q75, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
def draw_points(self, ax, data, center):
"""Draw individual observations as points at middle of the violin."""
kws = dict(s=np.square(self.linewidth * 2),
color=self.gray,
edgecolor=self.gray)
grid = np.ones(len(data)) * center
if self.orient == "v":
ax.scatter(grid, data, **kws)
else:
ax.scatter(data, grid, **kws)
def draw_stick_lines(self, ax, data, support, density,
center, split=False):
"""Draw individual observations as sticks at width of density."""
for val in data:
self.draw_to_density(ax, center, val, support, density, split,
linewidth=self.linewidth * .5)
def draw_to_density(self, ax, center, val, support, density, split, **kws):
"""Draw a line orthogonal to the value axis at width of density."""
idx = np.argmin(np.abs(support - val))
width = self.dwidth * density[idx] * .99
kws["color"] = self.gray
if self.orient == "v":
if split == "left":
ax.plot([center - width, center], [val, val], **kws)
elif split == "right":
ax.plot([center, center + width], [val, val], **kws)
else:
ax.plot([center - width, center + width], [val, val], **kws)
else:
if split == "left":
ax.plot([val, val], [center - width, center], **kws)
elif split == "right":
ax.plot([val, val], [center, center + width], **kws)
else:
ax.plot([val, val], [center - width, center + width], **kws)
def plot(self, ax):
"""Make the violin plot."""
self.draw_violins(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
def violinplot(
data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True, gridsize=100,
width=.8, inner="box", split=False, dodge=True, orient=None,
linewidth=None, color=None, palette=None, saturation=.75,
ax=None, **kwargs,
):
plotter = _ViolinPlotter(x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, dodge, orient, linewidth,
color, palette, saturation)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax | null |
171,779 | from textwrap import dedent
from numbers import Number
import warnings
from colorsys import rgb_to_hls
from functools import partial
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.collections import PatchCollection
import matplotlib.patches as Patches
import matplotlib.pyplot as plt
from seaborn._oldcore import (
variable_type,
infer_orient,
categorical_order,
)
from seaborn.relational import _RelationalPlotter
from seaborn import utils
from seaborn.utils import remove_na, _normal_quantile_func, _draw_figure, _default_color
from seaborn._statistics import EstimateAggregator
from seaborn.palettes import color_palette, husl_palette, light_palette, dark_palette
from seaborn.axisgrid import FacetGrid, _facet_docs
class _LVPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, dodge, k_depth, linewidth, scale, outlier_prop,
trust_alpha, showfliers=True):
def _lv_box_ends(self, vals):
def _lv_outliers(self, vals, k):
def _width_functions(self, width_func):
def _lvplot(self, box_data, positions,
color=[255. / 256., 185. / 256., 0.],
widths=1, ax=None, box_kws=None,
flier_kws=None,
line_kws=None):
def height(b):
def vert_perc_box(x, b, i, k, w):
def horz_perc_box(x, b, i, k, w):
def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,
line_kws=None):
def plot(self, ax, box_kws, flier_kws, line_kws):
def boxenplot(
data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
width=.8, dodge=True, k_depth='tukey', linewidth=None,
scale='exponential', outlier_prop=0.007, trust_alpha=0.05,
showfliers=True,
ax=None, box_kws=None, flier_kws=None, line_kws=None,
):
plotter = _LVPlotter(x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, dodge, k_depth, linewidth, scale,
outlier_prop, trust_alpha, showfliers)
if ax is None:
ax = plt.gca()
plotter.plot(ax, box_kws, flier_kws, line_kws)
return ax | null |
171,780 | from textwrap import dedent
from numbers import Number
import warnings
from colorsys import rgb_to_hls
from functools import partial
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.collections import PatchCollection
import matplotlib.patches as Patches
import matplotlib.pyplot as plt
from seaborn._oldcore import (
variable_type,
infer_orient,
categorical_order,
)
from seaborn.relational import _RelationalPlotter
from seaborn import utils
from seaborn.utils import remove_na, _normal_quantile_func, _draw_figure, _default_color
from seaborn._statistics import EstimateAggregator
from seaborn.palettes import color_palette, husl_palette, light_palette, dark_palette
from seaborn.axisgrid import FacetGrid, _facet_docs
class _CategoricalPlotterNew(_RelationalPlotter):
semantics = "x", "y", "hue", "units"
wide_structure = {"x": "@columns", "y": "@values", "hue": "@columns"}
# flat_structure = {"x": "@values", "y": "@values"}
flat_structure = {"y": "@values"}
_legend_func = "scatter"
_legend_attributes = ["color"]
def __init__(
self,
data=None,
variables={},
order=None,
orient=None,
require_numeric=False,
legend="auto",
):
super().__init__(data=data, variables=variables)
# This method takes care of some bookkeeping that is necessary because the
# original categorical plots (prior to the 2021 refactor) had some rules that
# don't fit exactly into the logic of _core. It may be wise to have a second
# round of refactoring that moves the logic deeper, but this will keep things
# relatively sensible for now.
# For wide data, orient determines assignment to x/y differently from the
# wide_structure rules in _core. If we do decide to make orient part of the
# _core variable assignment, we'll want to figure out how to express that.
if self.input_format == "wide" and orient == "h":
self.plot_data = self.plot_data.rename(columns={"x": "y", "y": "x"})
orig_variables = set(self.variables)
orig_x = self.variables.pop("x", None)
orig_y = self.variables.pop("y", None)
orig_x_type = self.var_types.pop("x", None)
orig_y_type = self.var_types.pop("y", None)
if "x" in orig_variables:
self.variables["y"] = orig_x
self.var_types["y"] = orig_x_type
if "y" in orig_variables:
self.variables["x"] = orig_y
self.var_types["x"] = orig_y_type
# The concept of an "orientation" is important to the original categorical
# plots, but there's no provision for it in _core, so we need to do it here.
# Note that it could be useful for the other functions in at least two ways
# (orienting a univariate distribution plot from long-form data and selecting
# the aggregation axis in lineplot), so we may want to eventually refactor it.
self.orient = infer_orient(
x=self.plot_data.get("x", None),
y=self.plot_data.get("y", None),
orient=orient,
require_numeric=require_numeric,
)
self.legend = legend
# Short-circuit in the case of an empty plot
if not self.has_xy_data:
return
# Categorical plots can be "univariate" in which case they get an anonymous
# category label on the opposite axis. Note: this duplicates code in the core
# scale_categorical function. We need to do it here because of the next line.
if self.cat_axis not in self.variables:
self.variables[self.cat_axis] = None
self.var_types[self.cat_axis] = "categorical"
self.plot_data[self.cat_axis] = ""
# Categorical variables have discrete levels that we need to track
cat_levels = categorical_order(self.plot_data[self.cat_axis], order)
self.var_levels[self.cat_axis] = cat_levels
def _hue_backcompat(self, color, palette, hue_order, force_hue=False):
"""Implement backwards compatibility for hue parametrization.
Note: the force_hue parameter is used so that functions can be shown to
pass existing tests during refactoring and then tested for new behavior.
It can be removed after completion of the work.
"""
# The original categorical functions applied a palette to the categorical axis
# by default. We want to require an explicit hue mapping, to be more consistent
# with how things work elsewhere now. I don't think there's any good way to
# do this gently -- because it's triggered by the default value of hue=None,
# users would always get a warning, unless we introduce some sentinel "default"
# argument for this change. That's possible, but asking users to set `hue=None`
# on every call is annoying.
# We are keeping the logic for implementing the old behavior in with the current
# system so that (a) we can punt on that decision and (b) we can ensure that
# refactored code passes old tests.
default_behavior = color is None or palette is not None
if force_hue and "hue" not in self.variables and default_behavior:
self._redundant_hue = True
self.plot_data["hue"] = self.plot_data[self.cat_axis]
self.variables["hue"] = self.variables[self.cat_axis]
self.var_types["hue"] = "categorical"
hue_order = self.var_levels[self.cat_axis]
# Because we convert the categorical axis variable to string,
# we need to update a dictionary palette too
if isinstance(palette, dict):
palette = {str(k): v for k, v in palette.items()}
else:
self._redundant_hue = False
# Previously, categorical plots had a trick where color= could seed the palette.
# Because that's an explicit parameterization, we are going to give it one
# release cycle with a warning before removing.
if "hue" in self.variables and palette is None and color is not None:
if not isinstance(color, str):
color = mpl.colors.to_hex(color)
palette = f"dark:{color}"
msg = (
"Setting a gradient palette using color= is deprecated and will be "
f"removed in version 0.13. Set `palette='{palette}'` for same effect."
)
warnings.warn(msg, FutureWarning)
return palette, hue_order
def _palette_without_hue_backcompat(self, palette, hue_order):
"""Provide one cycle where palette= implies hue= when not provided"""
if "hue" not in self.variables and palette is not None:
msg = "Passing `palette` without assigning `hue` is deprecated."
warnings.warn(msg, FutureWarning, stacklevel=3)
self.legend = False
self.plot_data["hue"] = self.plot_data[self.cat_axis]
self.variables["hue"] = self.variables.get(self.cat_axis)
self.var_types["hue"] = self.var_types.get(self.cat_axis)
hue_order = self.var_levels.get(self.cat_axis)
return hue_order
def cat_axis(self):
return {"v": "x", "h": "y"}[self.orient]
def _get_gray(self, colors):
"""Get a grayscale value that looks good with color."""
if not len(colors):
return None
unique_colors = np.unique(colors, axis=0)
light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]
lum = min(light_vals) * .6
return (lum, lum, lum)
def _adjust_cat_axis(self, ax, axis):
"""Set ticks and limits for a categorical variable."""
# Note: in theory, this could happen in _attach for all categorical axes
# But two reasons not to do that:
# - If it happens before plotting, autoscaling messes up the plot limits
# - It would change existing plots from other seaborn functions
if self.var_types[axis] != "categorical":
return
# If both x/y data are empty, the correct way to set up the plot is
# somewhat undefined; because we don't add null category data to the plot in
# this case we don't *have* a categorical axis (yet), so best to just bail.
if self.plot_data[axis].empty:
return
# We can infer the total number of categories (including those from previous
# plots that are not part of the plot we are currently making) from the number
# of ticks, which matplotlib sets up while doing unit conversion. This feels
# slightly risky, as if we are relying on something that may be a matplotlib
# implementation detail. But I cannot think of a better way to keep track of
# the state from previous categorical calls (see GH2516 for context)
n = len(getattr(ax, f"get_{axis}ticks")())
if axis == "x":
ax.xaxis.grid(False)
ax.set_xlim(-.5, n - .5, auto=None)
else:
ax.yaxis.grid(False)
# Note limits that correspond to previously-inverted y axis
ax.set_ylim(n - .5, -.5, auto=None)
def _native_width(self):
"""Return unit of width separating categories on native numeric scale."""
unique_values = np.unique(self.comp_data[self.cat_axis])
if len(unique_values) > 1:
native_width = np.nanmin(np.diff(unique_values))
else:
native_width = 1
return native_width
def _nested_offsets(self, width, dodge):
"""Return offsets for each hue level for dodged plots."""
offsets = None
if "hue" in self.variables and self._hue_map.levels is not None:
n_levels = len(self._hue_map.levels)
if dodge:
each_width = width / n_levels
offsets = np.linspace(0, width - each_width, n_levels)
offsets -= offsets.mean()
else:
offsets = np.zeros(n_levels)
return offsets
# Note that the plotting methods here aim (in most cases) to produce the
# exact same artists as the original (pre 0.12) version of the code, so
# there is some weirdness that might not otherwise be clean or make sense in
# this context, such as adding empty artists for combinations of variables
# with no observations
def plot_strips(
self,
jitter,
dodge,
color,
edgecolor,
plot_kws,
):
width = .8 * self._native_width
offsets = self._nested_offsets(width, dodge)
if jitter is True:
jlim = 0.1
else:
jlim = float(jitter)
if "hue" in self.variables and dodge and self._hue_map.levels is not None:
jlim /= len(self._hue_map.levels)
jlim *= self._native_width
jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)
iter_vars = [self.cat_axis]
if dodge:
iter_vars.append("hue")
ax = self.ax
dodge_move = jitter_move = 0
for sub_vars, sub_data in self.iter_data(iter_vars,
from_comp_data=True,
allow_empty=True):
if offsets is not None and (offsets != 0).any():
dodge_move = offsets[sub_data["hue"].map(self._hue_map.levels.index)]
jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0
adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move
sub_data[self.cat_axis] = adjusted_data
for var in "xy":
if self._log_scaled(var):
sub_data[var] = np.power(10, sub_data[var])
ax = self._get_axes(sub_vars)
points = ax.scatter(sub_data["x"], sub_data["y"], color=color, **plot_kws)
if "hue" in self.variables:
points.set_facecolors(self._hue_map(sub_data["hue"]))
if edgecolor == "gray": # XXX TODO change to "auto"
points.set_edgecolors(self._get_gray(points.get_facecolors()))
else:
points.set_edgecolors(edgecolor)
# Finalize the axes details
if self.legend == "auto":
show_legend = not self._redundant_hue and self.input_format != "wide"
else:
show_legend = bool(self.legend)
if show_legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
ax.legend(title=self.legend_title)
def plot_swarms(
self,
dodge,
color,
edgecolor,
warn_thresh,
plot_kws,
):
width = .8 * self._native_width
offsets = self._nested_offsets(width, dodge)
iter_vars = [self.cat_axis]
if dodge:
iter_vars.append("hue")
ax = self.ax
point_collections = {}
dodge_move = 0
for sub_vars, sub_data in self.iter_data(iter_vars,
from_comp_data=True,
allow_empty=True):
if offsets is not None:
dodge_move = offsets[sub_data["hue"].map(self._hue_map.levels.index)]
if not sub_data.empty:
sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move
for var in "xy":
if self._log_scaled(var):
sub_data[var] = np.power(10, sub_data[var])
ax = self._get_axes(sub_vars)
points = ax.scatter(sub_data["x"], sub_data["y"], color=color, **plot_kws)
if "hue" in self.variables:
points.set_facecolors(self._hue_map(sub_data["hue"]))
if edgecolor == "gray": # XXX TODO change to "auto"
points.set_edgecolors(self._get_gray(points.get_facecolors()))
else:
points.set_edgecolors(edgecolor)
if not sub_data.empty:
point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points
beeswarm = Beeswarm(
width=width, orient=self.orient, warn_thresh=warn_thresh,
)
for (ax, center), points in point_collections.items():
if points.get_offsets().shape[0] > 1:
def draw(points, renderer, *, center=center):
beeswarm(points, center)
if self.orient == "h":
scalex = False
scaley = ax.get_autoscaley_on()
else:
scalex = ax.get_autoscalex_on()
scaley = False
# This prevents us from undoing the nice categorical axis limits
# set in _adjust_cat_axis, because that method currently leave
# the autoscale flag in its original setting. It may be better
# to disable autoscaling there to avoid needing to do this.
fixed_scale = self.var_types[self.cat_axis] == "categorical"
ax.update_datalim(points.get_datalim(ax.transData))
if not fixed_scale and (scalex or scaley):
ax.autoscale_view(scalex=scalex, scaley=scaley)
super(points.__class__, points).draw(renderer)
points.draw = draw.__get__(points)
_draw_figure(ax.figure)
# Finalize the axes details
if self.legend == "auto":
show_legend = not self._redundant_hue and self.input_format != "wide"
else:
show_legend = bool(self.legend)
if show_legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
ax.legend(title=self.legend_title)
def _default_color(method, hue, color, kws):
"""If needed, get a default color by using the matplotlib property cycle."""
if hue is not None:
# This warning is probably user-friendly, but it's currently triggered
# in a FacetGrid context and I don't want to mess with that logic right now
# if color is not None:
# msg = "`color` is ignored when `hue` is assigned."
# warnings.warn(msg)
return None
kws = kws.copy()
kws.pop("label", None)
if color is not None:
return color
elif method.__name__ == "plot":
color = _normalize_kwargs(kws, mpl.lines.Line2D).get("color")
scout, = method([], [], scalex=False, scaley=False, color=color)
color = scout.get_color()
scout.remove()
elif method.__name__ == "scatter":
# Matplotlib will raise if the size of x/y don't match s/c,
# and the latter might be in the kws dict
scout_size = max(
np.atleast_1d(kws.get(key, [])).shape[0]
for key in ["s", "c", "fc", "facecolor", "facecolors"]
)
scout_x = scout_y = np.full(scout_size, np.nan)
scout = method(scout_x, scout_y, **kws)
facecolors = scout.get_facecolors()
if not len(facecolors):
# Handle bug in matplotlib <= 3.2 (I think)
# This will limit the ability to use non color= kwargs to specify
# a color in versions of matplotlib with the bug, but trying to
# work out what the user wanted by re-implementing the broken logic
# of inspecting the kwargs is probably too brittle.
single_color = False
else:
single_color = np.unique(facecolors, axis=0).shape[0] == 1
# Allow the user to specify an array of colors through various kwargs
if "c" not in kws and single_color:
color = to_rgb(facecolors[0])
scout.remove()
elif method.__name__ == "bar":
# bar() needs masked, not empty data, to generate a patch
scout, = method([np.nan], [np.nan], **kws)
color = to_rgb(scout.get_facecolor())
scout.remove()
elif method.__name__ == "fill_between":
# There is a bug on matplotlib < 3.3 where fill_between with
# datetime units and empty data will set incorrect autoscale limits
# To workaround it, we'll always return the first color in the cycle.
# https://github.com/matplotlib/matplotlib/issues/17586
ax = method.__self__
datetime_axis = any([
isinstance(ax.xaxis.converter, mpl.dates.DateConverter),
isinstance(ax.yaxis.converter, mpl.dates.DateConverter),
])
if Version(mpl.__version__) < Version("3.3") and datetime_axis:
return "C0"
kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)
scout = method([], [], **kws)
facecolor = scout.get_facecolor()
color = to_rgb(facecolor[0])
scout.remove()
return color
def stripplot(
data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
jitter=True, dodge=False, orient=None, color=None, palette=None,
size=5, edgecolor="gray", linewidth=0,
hue_norm=None, native_scale=False, formatter=None, legend="auto",
ax=None, **kwargs
):
p = _CategoricalPlotterNew(
data=data,
variables=_CategoricalPlotterNew.get_semantics(locals()),
order=order,
orient=orient,
require_numeric=False,
legend=legend,
)
if ax is None:
ax = plt.gca()
if p.var_types.get(p.cat_axis) == "categorical" or not native_scale:
p.scale_categorical(p.cat_axis, order=order, formatter=formatter)
p._attach(ax)
hue_order = p._palette_without_hue_backcompat(palette, hue_order)
palette, hue_order = p._hue_backcompat(color, palette, hue_order)
color = _default_color(ax.scatter, hue, color, kwargs)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
# XXX Copying possibly bad default decisions from original code for now
kwargs.setdefault("zorder", 3)
size = kwargs.get("s", size)
kwargs.update(dict(
s=size ** 2,
edgecolor=edgecolor,
linewidth=linewidth)
)
p.plot_strips(
jitter=jitter,
dodge=dodge,
color=color,
edgecolor=edgecolor,
plot_kws=kwargs,
)
# XXX this happens inside a plotting method in the distribution plots
# but maybe it's better out here? Alternatively, we have an open issue
# suggesting that _attach could add default axes labels, which seems smart.
p._add_axis_labels(ax)
p._adjust_cat_axis(ax, axis=p.cat_axis)
return ax | null |
171,781 | from textwrap import dedent
from numbers import Number
import warnings
from colorsys import rgb_to_hls
from functools import partial
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.collections import PatchCollection
import matplotlib.patches as Patches
import matplotlib.pyplot as plt
from seaborn._oldcore import (
variable_type,
infer_orient,
categorical_order,
)
from seaborn.relational import _RelationalPlotter
from seaborn import utils
from seaborn.utils import remove_na, _normal_quantile_func, _draw_figure, _default_color
from seaborn._statistics import EstimateAggregator
from seaborn.palettes import color_palette, husl_palette, light_palette, dark_palette
from seaborn.axisgrid import FacetGrid, _facet_docs
class _CategoricalPlotterNew(_RelationalPlotter):
semantics = "x", "y", "hue", "units"
wide_structure = {"x": "@columns", "y": "@values", "hue": "@columns"}
# flat_structure = {"x": "@values", "y": "@values"}
flat_structure = {"y": "@values"}
_legend_func = "scatter"
_legend_attributes = ["color"]
def __init__(
self,
data=None,
variables={},
order=None,
orient=None,
require_numeric=False,
legend="auto",
):
super().__init__(data=data, variables=variables)
# This method takes care of some bookkeeping that is necessary because the
# original categorical plots (prior to the 2021 refactor) had some rules that
# don't fit exactly into the logic of _core. It may be wise to have a second
# round of refactoring that moves the logic deeper, but this will keep things
# relatively sensible for now.
# For wide data, orient determines assignment to x/y differently from the
# wide_structure rules in _core. If we do decide to make orient part of the
# _core variable assignment, we'll want to figure out how to express that.
if self.input_format == "wide" and orient == "h":
self.plot_data = self.plot_data.rename(columns={"x": "y", "y": "x"})
orig_variables = set(self.variables)
orig_x = self.variables.pop("x", None)
orig_y = self.variables.pop("y", None)
orig_x_type = self.var_types.pop("x", None)
orig_y_type = self.var_types.pop("y", None)
if "x" in orig_variables:
self.variables["y"] = orig_x
self.var_types["y"] = orig_x_type
if "y" in orig_variables:
self.variables["x"] = orig_y
self.var_types["x"] = orig_y_type
# The concept of an "orientation" is important to the original categorical
# plots, but there's no provision for it in _core, so we need to do it here.
# Note that it could be useful for the other functions in at least two ways
# (orienting a univariate distribution plot from long-form data and selecting
# the aggregation axis in lineplot), so we may want to eventually refactor it.
self.orient = infer_orient(
x=self.plot_data.get("x", None),
y=self.plot_data.get("y", None),
orient=orient,
require_numeric=require_numeric,
)
self.legend = legend
# Short-circuit in the case of an empty plot
if not self.has_xy_data:
return
# Categorical plots can be "univariate" in which case they get an anonymous
# category label on the opposite axis. Note: this duplicates code in the core
# scale_categorical function. We need to do it here because of the next line.
if self.cat_axis not in self.variables:
self.variables[self.cat_axis] = None
self.var_types[self.cat_axis] = "categorical"
self.plot_data[self.cat_axis] = ""
# Categorical variables have discrete levels that we need to track
cat_levels = categorical_order(self.plot_data[self.cat_axis], order)
self.var_levels[self.cat_axis] = cat_levels
def _hue_backcompat(self, color, palette, hue_order, force_hue=False):
"""Implement backwards compatibility for hue parametrization.
Note: the force_hue parameter is used so that functions can be shown to
pass existing tests during refactoring and then tested for new behavior.
It can be removed after completion of the work.
"""
# The original categorical functions applied a palette to the categorical axis
# by default. We want to require an explicit hue mapping, to be more consistent
# with how things work elsewhere now. I don't think there's any good way to
# do this gently -- because it's triggered by the default value of hue=None,
# users would always get a warning, unless we introduce some sentinel "default"
# argument for this change. That's possible, but asking users to set `hue=None`
# on every call is annoying.
# We are keeping the logic for implementing the old behavior in with the current
# system so that (a) we can punt on that decision and (b) we can ensure that
# refactored code passes old tests.
default_behavior = color is None or palette is not None
if force_hue and "hue" not in self.variables and default_behavior:
self._redundant_hue = True
self.plot_data["hue"] = self.plot_data[self.cat_axis]
self.variables["hue"] = self.variables[self.cat_axis]
self.var_types["hue"] = "categorical"
hue_order = self.var_levels[self.cat_axis]
# Because we convert the categorical axis variable to string,
# we need to update a dictionary palette too
if isinstance(palette, dict):
palette = {str(k): v for k, v in palette.items()}
else:
self._redundant_hue = False
# Previously, categorical plots had a trick where color= could seed the palette.
# Because that's an explicit parameterization, we are going to give it one
# release cycle with a warning before removing.
if "hue" in self.variables and palette is None and color is not None:
if not isinstance(color, str):
color = mpl.colors.to_hex(color)
palette = f"dark:{color}"
msg = (
"Setting a gradient palette using color= is deprecated and will be "
f"removed in version 0.13. Set `palette='{palette}'` for same effect."
)
warnings.warn(msg, FutureWarning)
return palette, hue_order
def _palette_without_hue_backcompat(self, palette, hue_order):
"""Provide one cycle where palette= implies hue= when not provided"""
if "hue" not in self.variables and palette is not None:
msg = "Passing `palette` without assigning `hue` is deprecated."
warnings.warn(msg, FutureWarning, stacklevel=3)
self.legend = False
self.plot_data["hue"] = self.plot_data[self.cat_axis]
self.variables["hue"] = self.variables.get(self.cat_axis)
self.var_types["hue"] = self.var_types.get(self.cat_axis)
hue_order = self.var_levels.get(self.cat_axis)
return hue_order
def cat_axis(self):
return {"v": "x", "h": "y"}[self.orient]
def _get_gray(self, colors):
"""Get a grayscale value that looks good with color."""
if not len(colors):
return None
unique_colors = np.unique(colors, axis=0)
light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]
lum = min(light_vals) * .6
return (lum, lum, lum)
def _adjust_cat_axis(self, ax, axis):
"""Set ticks and limits for a categorical variable."""
# Note: in theory, this could happen in _attach for all categorical axes
# But two reasons not to do that:
# - If it happens before plotting, autoscaling messes up the plot limits
# - It would change existing plots from other seaborn functions
if self.var_types[axis] != "categorical":
return
# If both x/y data are empty, the correct way to set up the plot is
# somewhat undefined; because we don't add null category data to the plot in
# this case we don't *have* a categorical axis (yet), so best to just bail.
if self.plot_data[axis].empty:
return
# We can infer the total number of categories (including those from previous
# plots that are not part of the plot we are currently making) from the number
# of ticks, which matplotlib sets up while doing unit conversion. This feels
# slightly risky, as if we are relying on something that may be a matplotlib
# implementation detail. But I cannot think of a better way to keep track of
# the state from previous categorical calls (see GH2516 for context)
n = len(getattr(ax, f"get_{axis}ticks")())
if axis == "x":
ax.xaxis.grid(False)
ax.set_xlim(-.5, n - .5, auto=None)
else:
ax.yaxis.grid(False)
# Note limits that correspond to previously-inverted y axis
ax.set_ylim(n - .5, -.5, auto=None)
def _native_width(self):
"""Return unit of width separating categories on native numeric scale."""
unique_values = np.unique(self.comp_data[self.cat_axis])
if len(unique_values) > 1:
native_width = np.nanmin(np.diff(unique_values))
else:
native_width = 1
return native_width
def _nested_offsets(self, width, dodge):
"""Return offsets for each hue level for dodged plots."""
offsets = None
if "hue" in self.variables and self._hue_map.levels is not None:
n_levels = len(self._hue_map.levels)
if dodge:
each_width = width / n_levels
offsets = np.linspace(0, width - each_width, n_levels)
offsets -= offsets.mean()
else:
offsets = np.zeros(n_levels)
return offsets
# Note that the plotting methods here aim (in most cases) to produce the
# exact same artists as the original (pre 0.12) version of the code, so
# there is some weirdness that might not otherwise be clean or make sense in
# this context, such as adding empty artists for combinations of variables
# with no observations
def plot_strips(
self,
jitter,
dodge,
color,
edgecolor,
plot_kws,
):
width = .8 * self._native_width
offsets = self._nested_offsets(width, dodge)
if jitter is True:
jlim = 0.1
else:
jlim = float(jitter)
if "hue" in self.variables and dodge and self._hue_map.levels is not None:
jlim /= len(self._hue_map.levels)
jlim *= self._native_width
jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)
iter_vars = [self.cat_axis]
if dodge:
iter_vars.append("hue")
ax = self.ax
dodge_move = jitter_move = 0
for sub_vars, sub_data in self.iter_data(iter_vars,
from_comp_data=True,
allow_empty=True):
if offsets is not None and (offsets != 0).any():
dodge_move = offsets[sub_data["hue"].map(self._hue_map.levels.index)]
jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0
adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move
sub_data[self.cat_axis] = adjusted_data
for var in "xy":
if self._log_scaled(var):
sub_data[var] = np.power(10, sub_data[var])
ax = self._get_axes(sub_vars)
points = ax.scatter(sub_data["x"], sub_data["y"], color=color, **plot_kws)
if "hue" in self.variables:
points.set_facecolors(self._hue_map(sub_data["hue"]))
if edgecolor == "gray": # XXX TODO change to "auto"
points.set_edgecolors(self._get_gray(points.get_facecolors()))
else:
points.set_edgecolors(edgecolor)
# Finalize the axes details
if self.legend == "auto":
show_legend = not self._redundant_hue and self.input_format != "wide"
else:
show_legend = bool(self.legend)
if show_legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
ax.legend(title=self.legend_title)
def plot_swarms(
self,
dodge,
color,
edgecolor,
warn_thresh,
plot_kws,
):
width = .8 * self._native_width
offsets = self._nested_offsets(width, dodge)
iter_vars = [self.cat_axis]
if dodge:
iter_vars.append("hue")
ax = self.ax
point_collections = {}
dodge_move = 0
for sub_vars, sub_data in self.iter_data(iter_vars,
from_comp_data=True,
allow_empty=True):
if offsets is not None:
dodge_move = offsets[sub_data["hue"].map(self._hue_map.levels.index)]
if not sub_data.empty:
sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move
for var in "xy":
if self._log_scaled(var):
sub_data[var] = np.power(10, sub_data[var])
ax = self._get_axes(sub_vars)
points = ax.scatter(sub_data["x"], sub_data["y"], color=color, **plot_kws)
if "hue" in self.variables:
points.set_facecolors(self._hue_map(sub_data["hue"]))
if edgecolor == "gray": # XXX TODO change to "auto"
points.set_edgecolors(self._get_gray(points.get_facecolors()))
else:
points.set_edgecolors(edgecolor)
if not sub_data.empty:
point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points
beeswarm = Beeswarm(
width=width, orient=self.orient, warn_thresh=warn_thresh,
)
for (ax, center), points in point_collections.items():
if points.get_offsets().shape[0] > 1:
def draw(points, renderer, *, center=center):
beeswarm(points, center)
if self.orient == "h":
scalex = False
scaley = ax.get_autoscaley_on()
else:
scalex = ax.get_autoscalex_on()
scaley = False
# This prevents us from undoing the nice categorical axis limits
# set in _adjust_cat_axis, because that method currently leave
# the autoscale flag in its original setting. It may be better
# to disable autoscaling there to avoid needing to do this.
fixed_scale = self.var_types[self.cat_axis] == "categorical"
ax.update_datalim(points.get_datalim(ax.transData))
if not fixed_scale and (scalex or scaley):
ax.autoscale_view(scalex=scalex, scaley=scaley)
super(points.__class__, points).draw(renderer)
points.draw = draw.__get__(points)
_draw_figure(ax.figure)
# Finalize the axes details
if self.legend == "auto":
show_legend = not self._redundant_hue and self.input_format != "wide"
else:
show_legend = bool(self.legend)
if show_legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
ax.legend(title=self.legend_title)
def _default_color(method, hue, color, kws):
"""If needed, get a default color by using the matplotlib property cycle."""
if hue is not None:
# This warning is probably user-friendly, but it's currently triggered
# in a FacetGrid context and I don't want to mess with that logic right now
# if color is not None:
# msg = "`color` is ignored when `hue` is assigned."
# warnings.warn(msg)
return None
kws = kws.copy()
kws.pop("label", None)
if color is not None:
return color
elif method.__name__ == "plot":
color = _normalize_kwargs(kws, mpl.lines.Line2D).get("color")
scout, = method([], [], scalex=False, scaley=False, color=color)
color = scout.get_color()
scout.remove()
elif method.__name__ == "scatter":
# Matplotlib will raise if the size of x/y don't match s/c,
# and the latter might be in the kws dict
scout_size = max(
np.atleast_1d(kws.get(key, [])).shape[0]
for key in ["s", "c", "fc", "facecolor", "facecolors"]
)
scout_x = scout_y = np.full(scout_size, np.nan)
scout = method(scout_x, scout_y, **kws)
facecolors = scout.get_facecolors()
if not len(facecolors):
# Handle bug in matplotlib <= 3.2 (I think)
# This will limit the ability to use non color= kwargs to specify
# a color in versions of matplotlib with the bug, but trying to
# work out what the user wanted by re-implementing the broken logic
# of inspecting the kwargs is probably too brittle.
single_color = False
else:
single_color = np.unique(facecolors, axis=0).shape[0] == 1
# Allow the user to specify an array of colors through various kwargs
if "c" not in kws and single_color:
color = to_rgb(facecolors[0])
scout.remove()
elif method.__name__ == "bar":
# bar() needs masked, not empty data, to generate a patch
scout, = method([np.nan], [np.nan], **kws)
color = to_rgb(scout.get_facecolor())
scout.remove()
elif method.__name__ == "fill_between":
# There is a bug on matplotlib < 3.3 where fill_between with
# datetime units and empty data will set incorrect autoscale limits
# To workaround it, we'll always return the first color in the cycle.
# https://github.com/matplotlib/matplotlib/issues/17586
ax = method.__self__
datetime_axis = any([
isinstance(ax.xaxis.converter, mpl.dates.DateConverter),
isinstance(ax.yaxis.converter, mpl.dates.DateConverter),
])
if Version(mpl.__version__) < Version("3.3") and datetime_axis:
return "C0"
kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)
scout = method([], [], **kws)
facecolor = scout.get_facecolor()
color = to_rgb(facecolor[0])
scout.remove()
return color
def swarmplot(
data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
dodge=False, orient=None, color=None, palette=None,
size=5, edgecolor="gray", linewidth=0, hue_norm=None,
native_scale=False, formatter=None, legend="auto", warn_thresh=.05,
ax=None, **kwargs
):
p = _CategoricalPlotterNew(
data=data,
variables=_CategoricalPlotterNew.get_semantics(locals()),
order=order,
orient=orient,
require_numeric=False,
legend=legend,
)
if ax is None:
ax = plt.gca()
if p.var_types.get(p.cat_axis) == "categorical" or not native_scale:
p.scale_categorical(p.cat_axis, order=order, formatter=formatter)
p._attach(ax)
if not p.has_xy_data:
return ax
hue_order = p._palette_without_hue_backcompat(palette, hue_order)
palette, hue_order = p._hue_backcompat(color, palette, hue_order)
color = _default_color(ax.scatter, hue, color, kwargs)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
# XXX Copying possibly bad default decisions from original code for now
kwargs.setdefault("zorder", 3)
size = kwargs.get("s", size)
if linewidth is None:
linewidth = size / 10
kwargs.update(dict(
s=size ** 2,
linewidth=linewidth,
))
p.plot_swarms(
dodge=dodge,
color=color,
edgecolor=edgecolor,
warn_thresh=warn_thresh,
plot_kws=kwargs,
)
p._add_axis_labels(ax)
p._adjust_cat_axis(ax, axis=p.cat_axis)
return ax | null |
171,782 | from textwrap import dedent
from numbers import Number
import warnings
from colorsys import rgb_to_hls
from functools import partial
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.collections import PatchCollection
import matplotlib.patches as Patches
import matplotlib.pyplot as plt
from seaborn._oldcore import (
variable_type,
infer_orient,
categorical_order,
)
from seaborn.relational import _RelationalPlotter
from seaborn import utils
from seaborn.utils import remove_na, _normal_quantile_func, _draw_figure, _default_color
from seaborn._statistics import EstimateAggregator
from seaborn.palettes import color_palette, husl_palette, light_palette, dark_palette
from seaborn.axisgrid import FacetGrid, _facet_docs
class _BarPlotter(_CategoricalStatPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
estimator, errorbar, n_boot, units, seed,
orient, color, palette, saturation, width,
errcolor, errwidth, capsize, dodge):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, saturation)
self.estimate_statistic(estimator, errorbar, n_boot, seed)
self.dodge = dodge
self.width = width
self.errcolor = errcolor
self.errwidth = errwidth
self.capsize = capsize
def draw_bars(self, ax, kws):
"""Draw the bars onto `ax`."""
# Get the right matplotlib function depending on the orientation
barfunc = ax.bar if self.orient == "v" else ax.barh
barpos = np.arange(len(self.statistic))
if self.plot_hues is None:
# Draw the bars
barfunc(barpos, self.statistic, self.width,
color=self.colors, align="center", **kws)
# Draw the confidence intervals
errcolors = [self.errcolor] * len(barpos)
self.draw_confints(ax,
barpos,
self.confint,
errcolors,
self.errwidth,
self.capsize)
else:
for j, hue_level in enumerate(self.hue_names):
# Draw the bars
offpos = barpos + self.hue_offsets[j]
barfunc(offpos, self.statistic[:, j], self.nested_width,
color=self.colors[j], align="center",
label=hue_level, **kws)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.errcolor] * len(offpos)
self.draw_confints(ax,
offpos,
confint,
errcolors,
self.errwidth,
self.capsize)
def plot(self, ax, bar_kws):
"""Make the plot."""
self.draw_bars(ax, bar_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
def barplot(
data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
estimator="mean", errorbar=("ci", 95), n_boot=1000, units=None, seed=None,
orient=None, color=None, palette=None, saturation=.75, width=.8,
errcolor=".26", errwidth=None, capsize=None, dodge=True, ci="deprecated",
ax=None,
**kwargs,
):
errorbar = utils._deprecate_ci(errorbar, ci)
# Be backwards compatible with len passed directly, which
# does not work in Series.agg (maybe a pandas bug?)
if estimator is len:
estimator = "size"
plotter = _BarPlotter(x, y, hue, data, order, hue_order,
estimator, errorbar, n_boot, units, seed,
orient, color, palette, saturation,
width, errcolor, errwidth, capsize, dodge)
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax | null |
171,783 | from textwrap import dedent
from numbers import Number
import warnings
from colorsys import rgb_to_hls
from functools import partial
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.collections import PatchCollection
import matplotlib.patches as Patches
import matplotlib.pyplot as plt
from seaborn._oldcore import (
variable_type,
infer_orient,
categorical_order,
)
from seaborn.relational import _RelationalPlotter
from seaborn import utils
from seaborn.utils import remove_na, _normal_quantile_func, _draw_figure, _default_color
from seaborn._statistics import EstimateAggregator
from seaborn.palettes import color_palette, husl_palette, light_palette, dark_palette
from seaborn.axisgrid import FacetGrid, _facet_docs
class _PointPlotter(_CategoricalStatPlotter):
default_palette = "dark"
def __init__(self, x, y, hue, data, order, hue_order,
estimator, errorbar, n_boot, units, seed,
markers, linestyles, dodge, join, scale,
orient, color, palette, errwidth, capsize, label):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, 1)
self.estimate_statistic(estimator, errorbar, n_boot, seed)
# Override the default palette for single-color plots
if hue is None and color is None and palette is None:
self.colors = [color_palette()[0]] * len(self.colors)
# Don't join single-layer plots with different colors
if hue is None and palette is not None:
join = False
# Use a good default for `dodge=True`
if dodge is True and self.hue_names is not None:
dodge = .025 * len(self.hue_names)
# Make sure we have a marker for each hue level
if isinstance(markers, str):
markers = [markers] * len(self.colors)
self.markers = markers
# Make sure we have a line style for each hue level
if isinstance(linestyles, str):
linestyles = [linestyles] * len(self.colors)
self.linestyles = linestyles
# Set the other plot components
self.dodge = dodge
self.join = join
self.scale = scale
self.errwidth = errwidth
self.capsize = capsize
self.label = label
def hue_offsets(self):
"""Offsets relative to the center position for each hue level."""
if self.dodge:
offset = np.linspace(0, self.dodge, len(self.hue_names))
offset -= offset.mean()
else:
offset = np.zeros(len(self.hue_names))
return offset
def draw_points(self, ax):
"""Draw the main data components of the plot."""
# Get the center positions on the categorical axis
pointpos = np.arange(len(self.statistic))
# Get the size of the plot elements
lw = mpl.rcParams["lines.linewidth"] * 1.8 * self.scale
mew = lw * .75
markersize = np.pi * np.square(lw) * 2
if self.plot_hues is None:
# Draw lines joining each estimate point
if self.join:
color = self.colors[0]
ls = self.linestyles[0]
if self.orient == "h":
ax.plot(self.statistic, pointpos,
color=color, ls=ls, lw=lw)
else:
ax.plot(pointpos, self.statistic,
color=color, ls=ls, lw=lw)
# Draw the confidence intervals
self.draw_confints(ax, pointpos, self.confint, self.colors,
self.errwidth, self.capsize)
# Draw the estimate points
marker = self.markers[0]
colors = [mpl.colors.colorConverter.to_rgb(c) for c in self.colors]
if self.orient == "h":
x, y = self.statistic, pointpos
else:
x, y = pointpos, self.statistic
ax.scatter(x, y,
linewidth=mew, marker=marker, s=markersize,
facecolor=colors, edgecolor=colors, label=self.label)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Determine the values to plot for this level
statistic = self.statistic[:, j]
# Determine the position on the categorical and z axes
offpos = pointpos + offsets[j]
z = j + 1
# Draw lines joining each estimate point
if self.join:
color = self.colors[j]
ls = self.linestyles[j]
if self.orient == "h":
ax.plot(statistic, offpos, color=color,
zorder=z, ls=ls, lw=lw)
else:
ax.plot(offpos, statistic, color=color,
zorder=z, ls=ls, lw=lw)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.colors[j]] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors,
self.errwidth, self.capsize,
zorder=z)
# Draw the estimate points
n_points = len(remove_na(offpos))
marker = self.markers[j]
color = mpl.colors.colorConverter.to_rgb(self.colors[j])
if self.orient == "h":
x, y = statistic, offpos
else:
x, y = offpos, statistic
if not len(remove_na(statistic)):
x = y = [np.nan] * n_points
ax.scatter(x, y, label=hue_level,
facecolor=color, edgecolor=color,
linewidth=mew, marker=marker, s=markersize,
zorder=z)
def plot(self, ax):
"""Make the plot."""
self.draw_points(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
def pointplot(
data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
estimator="mean", errorbar=("ci", 95), n_boot=1000, units=None, seed=None,
markers="o", linestyles="-", dodge=False, join=True, scale=1,
orient=None, color=None, palette=None, errwidth=None, ci="deprecated",
capsize=None, label=None, ax=None,
):
errorbar = utils._deprecate_ci(errorbar, ci)
plotter = _PointPlotter(x, y, hue, data, order, hue_order,
estimator, errorbar, n_boot, units, seed,
markers, linestyles, dodge, join, scale,
orient, color, palette, errwidth, capsize, label)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax | null |
171,784 | from textwrap import dedent
from numbers import Number
import warnings
from colorsys import rgb_to_hls
from functools import partial
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.collections import PatchCollection
import matplotlib.patches as Patches
import matplotlib.pyplot as plt
from seaborn._oldcore import (
variable_type,
infer_orient,
categorical_order,
)
from seaborn.relational import _RelationalPlotter
from seaborn import utils
from seaborn.utils import remove_na, _normal_quantile_func, _draw_figure, _default_color
from seaborn._statistics import EstimateAggregator
from seaborn.palettes import color_palette, husl_palette, light_palette, dark_palette
from seaborn.axisgrid import FacetGrid, _facet_docs
class _CountPlotter(_BarPlotter):
def countplot(
data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75, width=.8,
dodge=True, ax=None, **kwargs
):
estimator = "size"
errorbar = None
n_boot = 0
units = None
seed = None
errcolor = None
errwidth = None
capsize = None
if x is None and y is not None:
orient = "h"
x = y
elif y is None and x is not None:
orient = "v"
y = x
elif x is not None and y is not None:
raise ValueError("Cannot pass values for both `x` and `y`")
plotter = _CountPlotter(
x, y, hue, data, order, hue_order,
estimator, errorbar, n_boot, units, seed,
orient, color, palette, saturation,
width, errcolor, errwidth, capsize, dodge
)
plotter.value_label = "count"
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax | null |
171,785 | from textwrap import dedent
from numbers import Number
import warnings
from colorsys import rgb_to_hls
from functools import partial
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.collections import PatchCollection
import matplotlib.patches as Patches
import matplotlib.pyplot as plt
from seaborn._oldcore import (
variable_type,
infer_orient,
categorical_order,
)
from seaborn.relational import _RelationalPlotter
from seaborn import utils
from seaborn.utils import remove_na, _normal_quantile_func, _draw_figure, _default_color
from seaborn._statistics import EstimateAggregator
from seaborn.palettes import color_palette, husl_palette, light_palette, dark_palette
from seaborn.axisgrid import FacetGrid, _facet_docs
class _CategoricalFacetPlotter(_CategoricalPlotterNew):
semantics = _CategoricalPlotterNew.semantics + ("col", "row")
class _CategoricalPlotter:
width = .8
default_palette = "light"
require_numeric = True
def establish_variables(self, x=None, y=None, hue=None, data=None,
orient=None, order=None, hue_order=None,
units=None):
"""Convert input specification into a common representation."""
# Option 1:
# We are plotting a wide-form dataset
# -----------------------------------
if x is None and y is None:
# Do a sanity check on the inputs
if hue is not None:
error = "Cannot use `hue` without `x` and `y`"
raise ValueError(error)
# No hue grouping with wide inputs
plot_hues = None
hue_title = None
hue_names = None
# No statistical units with wide inputs
plot_units = None
# We also won't get a axes labels here
value_label = None
group_label = None
# Option 1a:
# The input data is a Pandas DataFrame
# ------------------------------------
if isinstance(data, pd.DataFrame):
# Order the data correctly
if order is None:
order = []
# Reduce to just numeric columns
for col in data:
if variable_type(data[col]) == "numeric":
order.append(col)
plot_data = data[order]
group_names = order
group_label = data.columns.name
# Convert to a list of arrays, the common representation
iter_data = plot_data.items()
plot_data = [np.asarray(s, float) for k, s in iter_data]
# Option 1b:
# The input data is an array or list
# ----------------------------------
else:
# We can't reorder the data
if order is not None:
error = "Input data must be a pandas object to reorder"
raise ValueError(error)
# The input data is an array
if hasattr(data, "shape"):
if len(data.shape) == 1:
if np.isscalar(data[0]):
plot_data = [data]
else:
plot_data = list(data)
elif len(data.shape) == 2:
nr, nc = data.shape
if nr == 1 or nc == 1:
plot_data = [data.ravel()]
else:
plot_data = [data[:, i] for i in range(nc)]
else:
error = ("Input `data` can have no "
"more than 2 dimensions")
raise ValueError(error)
# Check if `data` is None to let us bail out here (for testing)
elif data is None:
plot_data = [[]]
# The input data is a flat list
elif np.isscalar(data[0]):
plot_data = [data]
# The input data is a nested list
# This will catch some things that might fail later
# but exhaustive checks are hard
else:
plot_data = data
# Convert to a list of arrays, the common representation
plot_data = [np.asarray(d, float) for d in plot_data]
# The group names will just be numeric indices
group_names = list(range(len(plot_data)))
# Figure out the plotting orientation
orient = "h" if str(orient).startswith("h") else "v"
# Option 2:
# We are plotting a long-form dataset
# -----------------------------------
else:
# See if we need to get variables from `data`
if data is not None:
x = data.get(x, x)
y = data.get(y, y)
hue = data.get(hue, hue)
units = data.get(units, units)
# Validate the inputs
for var in [x, y, hue, units]:
if isinstance(var, str):
err = f"Could not interpret input '{var}'"
raise ValueError(err)
# Figure out the plotting orientation
orient = infer_orient(
x, y, orient, require_numeric=self.require_numeric
)
# Option 2a:
# We are plotting a single set of data
# ------------------------------------
if x is None or y is None:
# Determine where the data are
vals = y if x is None else x
# Put them into the common representation
plot_data = [np.asarray(vals)]
# Get a label for the value axis
if hasattr(vals, "name"):
value_label = vals.name
else:
value_label = None
# This plot will not have group labels or hue nesting
groups = None
group_label = None
group_names = []
plot_hues = None
hue_names = None
hue_title = None
plot_units = None
# Option 2b:
# We are grouping the data values by another variable
# ---------------------------------------------------
else:
# Determine which role each variable will play
if orient == "v":
vals, groups = y, x
else:
vals, groups = x, y
# Get the categorical axis label
group_label = None
if hasattr(groups, "name"):
group_label = groups.name
# Get the order on the categorical axis
group_names = categorical_order(groups, order)
# Group the numeric data
plot_data, value_label = self._group_longform(vals, groups,
group_names)
# Now handle the hue levels for nested ordering
if hue is None:
plot_hues = None
hue_title = None
hue_names = None
else:
# Get the order of the hue levels
hue_names = categorical_order(hue, hue_order)
# Group the hue data
plot_hues, hue_title = self._group_longform(hue, groups,
group_names)
# Now handle the units for nested observations
if units is None:
plot_units = None
else:
plot_units, _ = self._group_longform(units, groups,
group_names)
# Assign object attributes
# ------------------------
self.orient = orient
self.plot_data = plot_data
self.group_label = group_label
self.value_label = value_label
self.group_names = group_names
self.plot_hues = plot_hues
self.hue_title = hue_title
self.hue_names = hue_names
self.plot_units = plot_units
def _group_longform(self, vals, grouper, order):
"""Group a long-form variable by another with correct order."""
# Ensure that the groupby will work
if not isinstance(vals, pd.Series):
if isinstance(grouper, pd.Series):
index = grouper.index
else:
index = None
vals = pd.Series(vals, index=index)
# Group the val data
grouped_vals = vals.groupby(grouper)
out_data = []
for g in order:
try:
g_vals = grouped_vals.get_group(g)
except KeyError:
g_vals = np.array([])
out_data.append(g_vals)
# Get the vals axis label
label = vals.name
return out_data, label
def establish_colors(self, color, palette, saturation):
"""Get a list of colors for the main component of the plots."""
if self.hue_names is None:
n_colors = len(self.plot_data)
else:
n_colors = len(self.hue_names)
# Determine the main colors
if color is None and palette is None:
# Determine whether the current palette will have enough values
# If not, we'll default to the husl palette so each is distinct
current_palette = utils.get_color_cycle()
if n_colors <= len(current_palette):
colors = color_palette(n_colors=n_colors)
else:
colors = husl_palette(n_colors, l=.7) # noqa
elif palette is None:
# When passing a specific color, the interpretation depends
# on whether there is a hue variable or not.
# If so, we will make a blend palette so that the different
# levels have some amount of variation.
if self.hue_names is None:
colors = [color] * n_colors
else:
if self.default_palette == "light":
colors = light_palette(color, n_colors)
elif self.default_palette == "dark":
colors = dark_palette(color, n_colors)
else:
raise RuntimeError("No default palette specified")
else:
# Let `palette` be a dict mapping level to color
if isinstance(palette, dict):
if self.hue_names is None:
levels = self.group_names
else:
levels = self.hue_names
palette = [palette[l] for l in levels]
colors = color_palette(palette, n_colors)
# Desaturate a bit because these are patches
if saturation < 1:
colors = color_palette(colors, desat=saturation)
# Convert the colors to a common representations
rgb_colors = color_palette(colors)
# Determine the gray color to use for the lines framing the plot
light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]
lum = min(light_vals) * .6
gray = mpl.colors.rgb2hex((lum, lum, lum))
# Assign object attributes
self.colors = rgb_colors
self.gray = gray
def hue_offsets(self):
"""A list of center positions for plots when hue nesting is used."""
n_levels = len(self.hue_names)
if self.dodge:
each_width = self.width / n_levels
offsets = np.linspace(0, self.width - each_width, n_levels)
offsets -= offsets.mean()
else:
offsets = np.zeros(n_levels)
return offsets
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
if self.dodge:
width = self.width / len(self.hue_names) * .98
else:
width = self.width
return width
def annotate_axes(self, ax):
"""Add descriptive labels to an Axes object."""
if self.orient == "v":
xlabel, ylabel = self.group_label, self.value_label
else:
xlabel, ylabel = self.value_label, self.group_label
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
group_names = self.group_names
if not group_names:
group_names = ["" for _ in range(len(self.plot_data))]
if self.orient == "v":
ax.set_xticks(np.arange(len(self.plot_data)))
ax.set_xticklabels(group_names)
else:
ax.set_yticks(np.arange(len(self.plot_data)))
ax.set_yticklabels(group_names)
if self.orient == "v":
ax.xaxis.grid(False)
ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)
else:
ax.yaxis.grid(False)
ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)
if self.hue_names is not None:
ax.legend(loc="best", title=self.hue_title)
def add_legend_data(self, ax, color, label):
"""Add a dummy patch object so we can get legend data."""
rect = plt.Rectangle([0, 0], 0, 0,
linewidth=self.linewidth / 2,
edgecolor=self.gray,
facecolor=color,
label=label)
ax.add_patch(rect)
class _BoxPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, dodge, fliersize, linewidth):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.dodge = dodge
self.width = width
self.fliersize = fliersize
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def draw_boxplot(self, ax, kws):
"""Use matplotlib to draw a boxplot on an Axes."""
vert = self.orient == "v"
props = {}
for obj in ["box", "whisker", "cap", "median", "flier"]:
props[obj] = kws.pop(obj + "props", {})
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Handle case where there is data at this level
if group_data.size == 0:
continue
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = np.asarray(remove_na(group_data))
# Handle case where there is no non-null data
if box_data.size == 0:
continue
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[i],
widths=self.width,
**kws)
color = self.colors[i]
self.restyle_boxplot(artist_dict, color, props)
else:
# Draw nested groups of boxes
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Add a legend for this hue level
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle case where there is data at this level
if group_data.size == 0:
continue
hue_mask = self.plot_hues[i] == hue_level
box_data = np.asarray(remove_na(group_data[hue_mask]))
# Handle case where there is no non-null data
if box_data.size == 0:
continue
center = i + offsets[j]
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[center],
widths=self.nested_width,
**kws)
self.restyle_boxplot(artist_dict, self.colors[j], props)
# Add legend data, but just for one set of boxes
def restyle_boxplot(self, artist_dict, color, props):
"""Take a drawn matplotlib boxplot and make it look nice."""
for box in artist_dict["boxes"]:
box.update(dict(facecolor=color,
zorder=.9,
edgecolor=self.gray,
linewidth=self.linewidth))
box.update(props["box"])
for whisk in artist_dict["whiskers"]:
whisk.update(dict(color=self.gray,
linewidth=self.linewidth,
linestyle="-"))
whisk.update(props["whisker"])
for cap in artist_dict["caps"]:
cap.update(dict(color=self.gray,
linewidth=self.linewidth))
cap.update(props["cap"])
for med in artist_dict["medians"]:
med.update(dict(color=self.gray,
linewidth=self.linewidth))
med.update(props["median"])
for fly in artist_dict["fliers"]:
fly.update(dict(markerfacecolor=self.gray,
marker="d",
markeredgecolor=self.gray,
markersize=self.fliersize))
fly.update(props["flier"])
def plot(self, ax, boxplot_kws):
"""Make the plot."""
self.draw_boxplot(ax, boxplot_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _ViolinPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, dodge, orient, linewidth,
color, palette, saturation):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.estimate_densities(bw, cut, scale, scale_hue, gridsize)
self.gridsize = gridsize
self.width = width
self.dodge = dodge
if inner is not None:
if not any([inner.startswith("quart"),
inner.startswith("box"),
inner.startswith("stick"),
inner.startswith("point")]):
err = f"Inner style '{inner}' not recognized"
raise ValueError(err)
self.inner = inner
if split and self.hue_names is not None and len(self.hue_names) != 2:
msg = "There must be exactly two hue levels to use `split`.'"
raise ValueError(msg)
self.split = split
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):
"""Find the support and density for all of the data."""
# Initialize data structures to keep track of plotting data
if self.hue_names is None:
support = []
density = []
counts = np.zeros(len(self.plot_data))
max_density = np.zeros(len(self.plot_data))
else:
support = [[] for _ in self.plot_data]
density = [[] for _ in self.plot_data]
size = len(self.group_names), len(self.hue_names)
counts = np.zeros(size)
max_density = np.zeros(size)
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
# Strip missing datapoints
kde_data = remove_na(group_data)
# Handle special case of no data at this level
if kde_data.size == 0:
support.append(np.array([]))
density.append(np.array([1.]))
counts[i] = 0
max_density[i] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support.append(np.unique(kde_data))
density.append(np.array([1.]))
counts[i] = 1
max_density[i] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_i = self.kde_support(kde_data, bw_used, cut, gridsize)
density_i = kde.evaluate(support_i)
# Update the data structures with these results
support.append(support_i)
density.append(density_i)
counts[i] = kde_data.size
max_density[i] = density_i.max()
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
# Handle special case of no data at this category level
if not group_data.size:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Select out the observations for this hue level
hue_mask = self.plot_hues[i] == hue_level
# Strip missing datapoints
kde_data = remove_na(group_data[hue_mask])
# Handle special case of no data at this level
if kde_data.size == 0:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support[i].append(np.unique(kde_data))
density[i].append(np.array([1.]))
counts[i, j] = 1
max_density[i, j] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_ij = self.kde_support(kde_data, bw_used,
cut, gridsize)
density_ij = kde.evaluate(support_ij)
# Update the data structures with these results
support[i].append(support_ij)
density[i].append(density_ij)
counts[i, j] = kde_data.size
max_density[i, j] = density_ij.max()
# Scale the height of the density curve.
# For a violinplot the density is non-quantitative.
# The objective here is to scale the curves relative to 1 so that
# they can be multiplied by the width parameter during plotting.
if scale == "area":
self.scale_area(density, max_density, scale_hue)
elif scale == "width":
self.scale_width(density)
elif scale == "count":
self.scale_count(density, counts, scale_hue)
else:
raise ValueError(f"scale method '{scale}' not recognized")
# Set object attributes that will be used while plotting
self.support = support
self.density = density
def fit_kde(self, x, bw):
"""Estimate a KDE for a vector of data with flexible bandwidth."""
kde = gaussian_kde(x, bw)
# Extract the numeric bandwidth from the KDE object
bw_used = kde.factor
# At this point, bw will be a numeric scale factor.
# To get the actual bandwidth of the kernel, we multiple by the
# unbiased standard deviation of the data, which we will use
# elsewhere to compute the range of the support.
bw_used = bw_used * x.std(ddof=1)
return kde, bw_used
def kde_support(self, x, bw, cut, gridsize):
"""Define a grid of support for the violin."""
support_min = x.min() - bw * cut
support_max = x.max() + bw * cut
return np.linspace(support_min, support_max, gridsize)
def scale_area(self, density, max_density, scale_hue):
"""Scale the relative area under the KDE curve.
This essentially preserves the "standard" KDE scaling, but the
resulting maximum density will be 1 so that the curve can be
properly multiplied by the violin width.
"""
if self.hue_names is None:
for d in density:
if d.size > 1:
d /= max_density.max()
else:
for i, group in enumerate(density):
for d in group:
if scale_hue:
max = max_density[i].max()
else:
max = max_density.max()
if d.size > 1:
d /= max
def scale_width(self, density):
"""Scale each density curve to the same height."""
if self.hue_names is None:
for d in density:
d /= d.max()
else:
for group in density:
for d in group:
d /= d.max()
def scale_count(self, density, counts, scale_hue):
"""Scale each density curve by the number of observations."""
if self.hue_names is None:
if counts.max() == 0:
d = 0
else:
for count, d in zip(counts, density):
d /= d.max()
d *= count / counts.max()
else:
for i, group in enumerate(density):
for j, d in enumerate(group):
if counts[i].max() == 0:
d = 0
else:
count = counts[i, j]
if scale_hue:
scaler = count / counts[i].max()
else:
scaler = count / counts.max()
d /= d.max()
d *= scaler
def dwidth(self):
if self.hue_names is None or not self.dodge:
return self.width / 2
elif self.split:
return self.width / 2
else:
return self.width / (2 * len(self.hue_names))
def draw_violins(self, ax):
"""Draw the violins onto `ax`."""
fill_func = ax.fill_betweenx if self.orient == "v" else ax.fill_between
for i, group_data in enumerate(self.plot_data):
kws = dict(edgecolor=self.gray, linewidth=self.linewidth)
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
support, density = self.support[i], self.density[i]
# Handle special case of no observations in this bin
if support.size == 0:
continue
# Handle special case of a single observation
elif support.size == 1:
val = support.item()
d = density.item()
self.draw_single_observation(ax, i, val, d)
continue
# Draw the violin for this group
grid = np.ones(self.gridsize) * i
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
facecolor=self.colors[i],
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, i)
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data, support, density, i)
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data, support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
support, density = self.support[i][j], self.density[i][j]
kws["facecolor"] = self.colors[j]
# Add legend data, but just for one set of violins
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle the special case where we have no observations
if support.size == 0:
continue
# Handle the special case where we have one observation
elif support.size == 1:
val = support.item()
d = density.item()
if self.split:
d = d / 2
at_group = i + offsets[j]
self.draw_single_observation(ax, at_group, val, d)
continue
# Option 2a: we are drawing a single split violin
# -----------------------------------------------
if self.split:
grid = np.ones(self.gridsize) * i
if j:
fill_func(support,
grid,
grid + density * self.dwidth,
**kws)
else:
fill_func(support,
grid - density * self.dwidth,
grid,
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw quartile lines
if self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density, i,
["left", "right"][j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density, i,
["left", "right"][j])
# The box and point interior plots are drawn for
# all data at the group level, so we just do that once
if j and any(self.plot_hues[0] == hue_level):
continue
# Get the whole vector for this group level
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2b: we are drawing full nested violins
# -----------------------------------------------
else:
grid = np.ones(self.gridsize) * (i + offsets[j])
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
**kws)
# Draw the interior representation
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, i + offsets[j])
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density,
i + offsets[j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i + offsets[j])
def draw_single_observation(self, ax, at_group, at_quant, density):
"""Draw a line to mark a single observation."""
d_width = density * self.dwidth
if self.orient == "v":
ax.plot([at_group - d_width, at_group + d_width],
[at_quant, at_quant],
color=self.gray,
linewidth=self.linewidth)
else:
ax.plot([at_quant, at_quant],
[at_group - d_width, at_group + d_width],
color=self.gray,
linewidth=self.linewidth)
def draw_box_lines(self, ax, data, center):
"""Draw boxplot information at center of the density."""
# Compute the boxplot statistics
q25, q50, q75 = np.percentile(data, [25, 50, 75])
whisker_lim = 1.5 * (q75 - q25)
h1 = np.min(data[data >= (q25 - whisker_lim)])
h2 = np.max(data[data <= (q75 + whisker_lim)])
# Draw a boxplot using lines and a point
if self.orient == "v":
ax.plot([center, center], [h1, h2],
linewidth=self.linewidth,
color=self.gray)
ax.plot([center, center], [q25, q75],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(center, q50,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
else:
ax.plot([h1, h2], [center, center],
linewidth=self.linewidth,
color=self.gray)
ax.plot([q25, q75], [center, center],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(q50, center,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
def draw_quartiles(self, ax, data, support, density, center, split=False):
"""Draw the quartiles as lines at width of density."""
q25, q50, q75 = np.percentile(data, [25, 50, 75])
self.draw_to_density(ax, center, q25, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
self.draw_to_density(ax, center, q50, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 3] * 2)
self.draw_to_density(ax, center, q75, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
def draw_points(self, ax, data, center):
"""Draw individual observations as points at middle of the violin."""
kws = dict(s=np.square(self.linewidth * 2),
color=self.gray,
edgecolor=self.gray)
grid = np.ones(len(data)) * center
if self.orient == "v":
ax.scatter(grid, data, **kws)
else:
ax.scatter(data, grid, **kws)
def draw_stick_lines(self, ax, data, support, density,
center, split=False):
"""Draw individual observations as sticks at width of density."""
for val in data:
self.draw_to_density(ax, center, val, support, density, split,
linewidth=self.linewidth * .5)
def draw_to_density(self, ax, center, val, support, density, split, **kws):
"""Draw a line orthogonal to the value axis at width of density."""
idx = np.argmin(np.abs(support - val))
width = self.dwidth * density[idx] * .99
kws["color"] = self.gray
if self.orient == "v":
if split == "left":
ax.plot([center - width, center], [val, val], **kws)
elif split == "right":
ax.plot([center, center + width], [val, val], **kws)
else:
ax.plot([center - width, center + width], [val, val], **kws)
else:
if split == "left":
ax.plot([val, val], [center - width, center], **kws)
elif split == "right":
ax.plot([val, val], [center, center + width], **kws)
else:
ax.plot([val, val], [center - width, center + width], **kws)
def plot(self, ax):
"""Make the violin plot."""
self.draw_violins(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _BarPlotter(_CategoricalStatPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
estimator, errorbar, n_boot, units, seed,
orient, color, palette, saturation, width,
errcolor, errwidth, capsize, dodge):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, saturation)
self.estimate_statistic(estimator, errorbar, n_boot, seed)
self.dodge = dodge
self.width = width
self.errcolor = errcolor
self.errwidth = errwidth
self.capsize = capsize
def draw_bars(self, ax, kws):
"""Draw the bars onto `ax`."""
# Get the right matplotlib function depending on the orientation
barfunc = ax.bar if self.orient == "v" else ax.barh
barpos = np.arange(len(self.statistic))
if self.plot_hues is None:
# Draw the bars
barfunc(barpos, self.statistic, self.width,
color=self.colors, align="center", **kws)
# Draw the confidence intervals
errcolors = [self.errcolor] * len(barpos)
self.draw_confints(ax,
barpos,
self.confint,
errcolors,
self.errwidth,
self.capsize)
else:
for j, hue_level in enumerate(self.hue_names):
# Draw the bars
offpos = barpos + self.hue_offsets[j]
barfunc(offpos, self.statistic[:, j], self.nested_width,
color=self.colors[j], align="center",
label=hue_level, **kws)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.errcolor] * len(offpos)
self.draw_confints(ax,
offpos,
confint,
errcolors,
self.errwidth,
self.capsize)
def plot(self, ax, bar_kws):
"""Make the plot."""
self.draw_bars(ax, bar_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _PointPlotter(_CategoricalStatPlotter):
default_palette = "dark"
def __init__(self, x, y, hue, data, order, hue_order,
estimator, errorbar, n_boot, units, seed,
markers, linestyles, dodge, join, scale,
orient, color, palette, errwidth, capsize, label):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, 1)
self.estimate_statistic(estimator, errorbar, n_boot, seed)
# Override the default palette for single-color plots
if hue is None and color is None and palette is None:
self.colors = [color_palette()[0]] * len(self.colors)
# Don't join single-layer plots with different colors
if hue is None and palette is not None:
join = False
# Use a good default for `dodge=True`
if dodge is True and self.hue_names is not None:
dodge = .025 * len(self.hue_names)
# Make sure we have a marker for each hue level
if isinstance(markers, str):
markers = [markers] * len(self.colors)
self.markers = markers
# Make sure we have a line style for each hue level
if isinstance(linestyles, str):
linestyles = [linestyles] * len(self.colors)
self.linestyles = linestyles
# Set the other plot components
self.dodge = dodge
self.join = join
self.scale = scale
self.errwidth = errwidth
self.capsize = capsize
self.label = label
def hue_offsets(self):
"""Offsets relative to the center position for each hue level."""
if self.dodge:
offset = np.linspace(0, self.dodge, len(self.hue_names))
offset -= offset.mean()
else:
offset = np.zeros(len(self.hue_names))
return offset
def draw_points(self, ax):
"""Draw the main data components of the plot."""
# Get the center positions on the categorical axis
pointpos = np.arange(len(self.statistic))
# Get the size of the plot elements
lw = mpl.rcParams["lines.linewidth"] * 1.8 * self.scale
mew = lw * .75
markersize = np.pi * np.square(lw) * 2
if self.plot_hues is None:
# Draw lines joining each estimate point
if self.join:
color = self.colors[0]
ls = self.linestyles[0]
if self.orient == "h":
ax.plot(self.statistic, pointpos,
color=color, ls=ls, lw=lw)
else:
ax.plot(pointpos, self.statistic,
color=color, ls=ls, lw=lw)
# Draw the confidence intervals
self.draw_confints(ax, pointpos, self.confint, self.colors,
self.errwidth, self.capsize)
# Draw the estimate points
marker = self.markers[0]
colors = [mpl.colors.colorConverter.to_rgb(c) for c in self.colors]
if self.orient == "h":
x, y = self.statistic, pointpos
else:
x, y = pointpos, self.statistic
ax.scatter(x, y,
linewidth=mew, marker=marker, s=markersize,
facecolor=colors, edgecolor=colors, label=self.label)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Determine the values to plot for this level
statistic = self.statistic[:, j]
# Determine the position on the categorical and z axes
offpos = pointpos + offsets[j]
z = j + 1
# Draw lines joining each estimate point
if self.join:
color = self.colors[j]
ls = self.linestyles[j]
if self.orient == "h":
ax.plot(statistic, offpos, color=color,
zorder=z, ls=ls, lw=lw)
else:
ax.plot(offpos, statistic, color=color,
zorder=z, ls=ls, lw=lw)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.colors[j]] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors,
self.errwidth, self.capsize,
zorder=z)
# Draw the estimate points
n_points = len(remove_na(offpos))
marker = self.markers[j]
color = mpl.colors.colorConverter.to_rgb(self.colors[j])
if self.orient == "h":
x, y = statistic, offpos
else:
x, y = offpos, statistic
if not len(remove_na(statistic)):
x = y = [np.nan] * n_points
ax.scatter(x, y, label=hue_level,
facecolor=color, edgecolor=color,
linewidth=mew, marker=marker, s=markersize,
zorder=z)
def plot(self, ax):
"""Make the plot."""
self.draw_points(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _CountPlotter(_BarPlotter):
require_numeric = False
class _LVPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, dodge, k_depth, linewidth, scale, outlier_prop,
trust_alpha, showfliers=True):
self.width = width
self.dodge = dodge
self.saturation = saturation
k_depth_methods = ['proportion', 'tukey', 'trustworthy', 'full']
if not (k_depth in k_depth_methods or isinstance(k_depth, Number)):
msg = (f'k_depth must be one of {k_depth_methods} or a number, '
f'but {k_depth} was passed.')
raise ValueError(msg)
self.k_depth = k_depth
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
scales = ['linear', 'exponential', 'area']
if scale not in scales:
msg = f'scale must be one of {scales}, but {scale} was passed.'
raise ValueError(msg)
self.scale = scale
if ((outlier_prop > 1) or (outlier_prop <= 0)):
msg = f'outlier_prop {outlier_prop} not in range (0, 1]'
raise ValueError(msg)
self.outlier_prop = outlier_prop
if not 0 < trust_alpha < 1:
msg = f'trust_alpha {trust_alpha} not in range (0, 1)'
raise ValueError(msg)
self.trust_alpha = trust_alpha
self.showfliers = showfliers
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
def _lv_box_ends(self, vals):
"""Get the number of data points and calculate `depth` of
letter-value plot."""
vals = np.asarray(vals)
# Remove infinite values while handling a 'object' dtype
# that can come from pd.Float64Dtype() input
with pd.option_context('mode.use_inf_as_na', True):
vals = vals[~pd.isnull(vals)]
n = len(vals)
p = self.outlier_prop
# Select the depth, i.e. number of boxes to draw, based on the method
if self.k_depth == 'full':
# extend boxes to 100% of the data
k = int(np.log2(n)) + 1
elif self.k_depth == 'tukey':
# This results with 5-8 points in each tail
k = int(np.log2(n)) - 3
elif self.k_depth == 'proportion':
k = int(np.log2(n)) - int(np.log2(n * p)) + 1
elif self.k_depth == 'trustworthy':
point_conf = 2 * _normal_quantile_func(1 - self.trust_alpha / 2) ** 2
k = int(np.log2(n / point_conf)) + 1
else:
k = int(self.k_depth) # allow having k as input
# If the number happens to be less than 1, set k to 1
if k < 1:
k = 1
# Calculate the upper end for each of the k boxes
upper = [100 * (1 - 0.5 ** (i + 1)) for i in range(k, 0, -1)]
# Calculate the lower end for each of the k boxes
lower = [100 * (0.5 ** (i + 1)) for i in range(k, 0, -1)]
# Stitch the box ends together
percentile_ends = [(i, j) for i, j in zip(lower, upper)]
box_ends = [np.percentile(vals, q) for q in percentile_ends]
return box_ends, k
def _lv_outliers(self, vals, k):
"""Find the outliers based on the letter value depth."""
box_edge = 0.5 ** (k + 1)
perc_ends = (100 * box_edge, 100 * (1 - box_edge))
edges = np.percentile(vals, perc_ends)
lower_out = vals[np.where(vals < edges[0])[0]]
upper_out = vals[np.where(vals > edges[1])[0]]
return np.concatenate((lower_out, upper_out))
def _width_functions(self, width_func):
# Dictionary of functions for computing the width of the boxes
width_functions = {'linear': lambda h, i, k: (i + 1.) / k,
'exponential': lambda h, i, k: 2**(-k + i - 1),
'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}
return width_functions[width_func]
def _lvplot(self, box_data, positions,
color=[255. / 256., 185. / 256., 0.],
widths=1, ax=None, box_kws=None,
flier_kws=None,
line_kws=None):
# -- Default keyword dicts - based on
# distributions.plot_univariate_histogram
box_kws = {} if box_kws is None else box_kws.copy()
flier_kws = {} if flier_kws is None else flier_kws.copy()
line_kws = {} if line_kws is None else line_kws.copy()
# Set the default kwargs for the boxes
box_default_kws = dict(edgecolor=self.gray,
linewidth=self.linewidth)
for k, v in box_default_kws.items():
box_kws.setdefault(k, v)
# Set the default kwargs for the lines denoting medians
line_default_kws = dict(
color=".15", alpha=0.45, solid_capstyle="butt", linewidth=self.linewidth
)
for k, v in line_default_kws.items():
line_kws.setdefault(k, v)
# Set the default kwargs for the outliers scatterplot
flier_default_kws = dict(marker='d', color=self.gray)
for k, v in flier_default_kws.items():
flier_kws.setdefault(k, v)
vert = self.orient == "v"
x = positions[0]
box_data = np.asarray(box_data)
# If we only have one data point, plot a line
if len(box_data) == 1:
line_kws.update({
'color': box_kws['edgecolor'],
'linestyle': box_kws.get('linestyle', '-'),
'linewidth': max(box_kws["linewidth"], line_kws["linewidth"])
})
ys = [box_data[0], box_data[0]]
xs = [x - widths / 2, x + widths / 2]
if vert:
xx, yy = xs, ys
else:
xx, yy = ys, xs
ax.plot(xx, yy, **line_kws)
else:
# Get the number of data points and calculate "depth" of
# letter-value plot
box_ends, k = self._lv_box_ends(box_data)
# Anonymous functions for calculating the width and height
# of the letter value boxes
width = self._width_functions(self.scale)
# Function to find height of boxes
def height(b):
return b[1] - b[0]
# Functions to construct the letter value boxes
def vert_perc_box(x, b, i, k, w):
rect = Patches.Rectangle((x - widths * w / 2, b[0]),
widths * w,
height(b), fill=True)
return rect
def horz_perc_box(x, b, i, k, w):
rect = Patches.Rectangle((b[0], x - widths * w / 2),
height(b), widths * w,
fill=True)
return rect
# Scale the width of the boxes so the biggest starts at 1
w_area = np.array([width(height(b), i, k)
for i, b in enumerate(box_ends)])
w_area = w_area / np.max(w_area)
# Calculate the medians
y = np.median(box_data)
# Calculate the outliers and plot (only if showfliers == True)
outliers = []
if self.showfliers:
outliers = self._lv_outliers(box_data, k)
hex_color = mpl.colors.rgb2hex(color)
if vert:
box_func = vert_perc_box
xs_median = [x - widths / 2, x + widths / 2]
ys_median = [y, y]
xs_outliers = np.full(len(outliers), x)
ys_outliers = outliers
else:
box_func = horz_perc_box
xs_median = [y, y]
ys_median = [x - widths / 2, x + widths / 2]
xs_outliers = outliers
ys_outliers = np.full(len(outliers), x)
# Plot the medians
ax.plot(
xs_median,
ys_median,
**line_kws
)
# Plot outliers (if any)
if len(outliers) > 0:
ax.scatter(xs_outliers, ys_outliers,
**flier_kws
)
# Construct a color map from the input color
rgb = [hex_color, (1, 1, 1)]
cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)
# Make sure that the last boxes contain hue and are not pure white
rgb = [hex_color, cmap(.85)]
cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)
# Update box_kws with `cmap` if not defined in dict until now
box_kws.setdefault('cmap', cmap)
boxes = [box_func(x, b[0], i, k, b[1])
for i, b in enumerate(zip(box_ends, w_area))]
collection = PatchCollection(boxes, **box_kws)
# Set the color gradation, first box will have color=hex_color
collection.set_array(np.array(np.linspace(1, 0, len(boxes))))
# Plot the boxes
ax.add_collection(collection)
def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,
line_kws=None):
"""Use matplotlib to draw a letter value plot on an Axes."""
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Handle case where there is data at this level
if group_data.size == 0:
continue
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = remove_na(group_data)
# Handle case where there is no non-null data
if box_data.size == 0:
continue
color = self.colors[i]
self._lvplot(box_data,
positions=[i],
color=color,
widths=self.width,
ax=ax,
box_kws=box_kws,
flier_kws=flier_kws,
line_kws=line_kws)
else:
# Draw nested groups of boxes
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Add a legend for this hue level
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle case where there is data at this level
if group_data.size == 0:
continue
hue_mask = self.plot_hues[i] == hue_level
box_data = remove_na(group_data[hue_mask])
# Handle case where there is no non-null data
if box_data.size == 0:
continue
color = self.colors[j]
center = i + offsets[j]
self._lvplot(box_data,
positions=[center],
color=color,
widths=self.nested_width,
ax=ax,
box_kws=box_kws,
flier_kws=flier_kws,
line_kws=line_kws)
# Autoscale the values axis to make sure all patches are visible
ax.autoscale_view(scalex=self.orient == "h", scaley=self.orient == "v")
def plot(self, ax, box_kws, flier_kws, line_kws):
"""Make the plot."""
self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class FacetGrid(Grid):
"""Multi-plot grid for plotting conditional relationships."""
def __init__(
self, data, *,
row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, height=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=False, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None,
):
super().__init__()
# Determine the hue facet layer information
hue_var = hue
if hue is None:
hue_names = None
else:
hue_names = categorical_order(data[hue], hue_order)
colors = self._get_palette(data, hue, hue_order, palette)
# Set up the lists of names for the row and column facet variables
if row is None:
row_names = []
else:
row_names = categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = categorical_order(data[col], col_order)
# Additional dict of kwarg -> list of values for mapping the hue var
hue_kws = hue_kws if hue_kws is not None else {}
# Make a boolean mask that is True anywhere there is an NA
# value in one of the faceting variables, but only if dropna is True
none_na = np.zeros(len(data), bool)
if dropna:
row_na = none_na if row is None else data[row].isnull()
col_na = none_na if col is None else data[col].isnull()
hue_na = none_na if hue is None else data[hue].isnull()
not_na = ~(row_na | col_na | hue_na)
else:
not_na = ~none_na
# Compute the grid shape
ncol = 1 if col is None else len(col_names)
nrow = 1 if row is None else len(row_names)
self._n_facets = ncol * nrow
self._col_wrap = col_wrap
if col_wrap is not None:
if row is not None:
err = "Cannot use `row` and `col_wrap` together."
raise ValueError(err)
ncol = col_wrap
nrow = int(np.ceil(len(col_names) / col_wrap))
self._ncol = ncol
self._nrow = nrow
# Calculate the base figure size
# This can get stretched later by a legend
# TODO this doesn't account for axis labels
figsize = (ncol * height * aspect, nrow * height)
# Validate some inputs
if col_wrap is not None:
margin_titles = False
# Build the subplot keyword dictionary
subplot_kws = {} if subplot_kws is None else subplot_kws.copy()
gridspec_kws = {} if gridspec_kws is None else gridspec_kws.copy()
if xlim is not None:
subplot_kws["xlim"] = xlim
if ylim is not None:
subplot_kws["ylim"] = ylim
# --- Initialize the subplot grid
with _disable_autolayout():
fig = plt.figure(figsize=figsize)
if col_wrap is None:
kwargs = dict(squeeze=False,
sharex=sharex, sharey=sharey,
subplot_kw=subplot_kws,
gridspec_kw=gridspec_kws)
axes = fig.subplots(nrow, ncol, **kwargs)
if col is None and row is None:
axes_dict = {}
elif col is None:
axes_dict = dict(zip(row_names, axes.flat))
elif row is None:
axes_dict = dict(zip(col_names, axes.flat))
else:
facet_product = product(row_names, col_names)
axes_dict = dict(zip(facet_product, axes.flat))
else:
# If wrapping the col variable we need to make the grid ourselves
if gridspec_kws:
warnings.warn("`gridspec_kws` ignored when using `col_wrap`")
n_axes = len(col_names)
axes = np.empty(n_axes, object)
axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)
if sharex:
subplot_kws["sharex"] = axes[0]
if sharey:
subplot_kws["sharey"] = axes[0]
for i in range(1, n_axes):
axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)
axes_dict = dict(zip(col_names, axes))
# --- Set up the class attributes
# Attributes that are part of the public API but accessed through
# a property so that Sphinx adds them to the auto class doc
self._figure = fig
self._axes = axes
self._axes_dict = axes_dict
self._legend = None
# Public attributes that aren't explicitly documented
# (It's not obvious that having them be public was a good idea)
self.data = data
self.row_names = row_names
self.col_names = col_names
self.hue_names = hue_names
self.hue_kws = hue_kws
# Next the private variables
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._margin_titles = margin_titles
self._margin_titles_texts = []
self._col_wrap = col_wrap
self._hue_var = hue_var
self._colors = colors
self._legend_out = legend_out
self._legend_data = {}
self._x_var = None
self._y_var = None
self._sharex = sharex
self._sharey = sharey
self._dropna = dropna
self._not_na = not_na
# --- Make the axes look good
self.set_titles()
self.tight_layout()
if despine:
self.despine()
if sharex in [True, 'col']:
for ax in self._not_bottom_axes:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
if sharey in [True, 'row']:
for ax in self._not_left_axes:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
__init__.__doc__ = dedent("""\
Initialize the matplotlib figure and FacetGrid object.
This class maps a dataset onto multiple axes arrayed in a grid of rows
and columns that correspond to *levels* of variables in the dataset.
The plots it produces are often called "lattice", "trellis", or
"small-multiple" graphics.
It can also represent levels of a third variable with the ``hue``
parameter, which plots different subsets of data in different colors.
This uses color to resolve elements on a third dimension, but only
draws subsets on top of each other and will not tailor the ``hue``
parameter for the specific visualization the way that axes-level
functions that accept ``hue`` will.
The basic workflow is to initialize the :class:`FacetGrid` object with
the dataset and the variables that are used to structure the grid. Then
one or more plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map` or :meth:`FacetGrid.map_dataframe`. Finally, the
plot can be tweaked with other methods to do things like change the
axis labels, use different ticks, or add a legend. See the detailed
code examples below for more information.
.. warning::
When using seaborn functions that infer semantic mappings from a
dataset, care must be taken to synchronize those mappings across
facets (e.g., by defining the ``hue`` mapping with a palette dict or
setting the data type of the variables to ``category``). In most cases,
it will be better to use a figure-level function (e.g. :func:`relplot`
or :func:`catplot`) than to use :class:`FacetGrid` directly.
See the :ref:`tutorial <grid_tutorial>` for more information.
Parameters
----------
{data}
row, col, hue : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``{{var}}_order`` parameters to
control the order of levels of this variable.
{col_wrap}
{share_xy}
{height}
{aspect}
{palette}
{{row,col,hue}}_order : lists
Order for the levels of the faceting variables. By default, this
will be the order that the levels appear in ``data`` or, if the
variables are pandas categoricals, the category order.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
{legend_out}
despine : boolean
Remove the top and right spines from the plots.
{margin_titles}
{{x, y}}lim: tuples
Limits for each of the axes on each facet (only relevant when
share{{x, y}} is True).
subplot_kws : dict
Dictionary of keyword arguments passed to matplotlib subplot(s)
methods.
gridspec_kws : dict
Dictionary of keyword arguments passed to
:class:`matplotlib.gridspec.GridSpec`
(via :meth:`matplotlib.figure.Figure.subplots`).
Ignored if ``col_wrap`` is not ``None``.
See Also
--------
PairGrid : Subplot grid for plotting pairwise relationships
relplot : Combine a relational plot and a :class:`FacetGrid`
displot : Combine a distribution plot and a :class:`FacetGrid`
catplot : Combine a categorical plot and a :class:`FacetGrid`
lmplot : Combine a regression plot and a :class:`FacetGrid`
Examples
--------
.. note::
These examples use seaborn functions to demonstrate some of the
advanced features of the class, but in most cases you will want
to use figue-level functions (e.g. :func:`displot`, :func:`relplot`)
to make the plots shown here.
.. include:: ../docstrings/FacetGrid.rst
""").format(**_facet_docs)
def facet_data(self):
"""Generator for name indices and data subsets for each facet.
Yields
------
(i, j, k), data_ijk : tuple of ints, DataFrame
The ints provide an index into the {row, col, hue}_names attribute,
and the dataframe contains a subset of the full data corresponding
to each facet. The generator yields subsets that correspond with
the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`
is None.
"""
data = self.data
# Construct masks for the row variable
if self.row_names:
row_masks = [data[self._row_var] == n for n in self.row_names]
else:
row_masks = [np.repeat(True, len(self.data))]
# Construct masks for the column variable
if self.col_names:
col_masks = [data[self._col_var] == n for n in self.col_names]
else:
col_masks = [np.repeat(True, len(self.data))]
# Construct masks for the hue variable
if self.hue_names:
hue_masks = [data[self._hue_var] == n for n in self.hue_names]
else:
hue_masks = [np.repeat(True, len(self.data))]
# Here is the main generator loop
for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),
enumerate(col_masks),
enumerate(hue_masks)):
data_ijk = data[row & col & hue & self._not_na]
yield (i, j, k), data_ijk
def map(self, func, *args, **kwargs):
"""Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# How we use the function depends on where it comes from
func_module = str(getattr(func, "__module__", ""))
# Check for categorical plots without order information
if func_module == "seaborn.categorical":
if "order" not in kwargs:
warning = ("Using the {} function without specifying "
"`order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
if len(args) == 3 and "hue_order" not in kwargs:
warning = ("Using the {} function without specifying "
"`hue_order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not func_module.startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = utils.to_utf8(self.hue_names[hue_k])
# Get the actual data we are going to plot with
plot_data = data_ijk[list(args)]
if self._dropna:
plot_data = plot_data.dropna()
plot_args = [v for k, v in plot_data.items()]
# Some matplotlib functions don't handle pandas objects correctly
if func_module.startswith("matplotlib"):
plot_args = [v.values for v in plot_args]
# Draw the plot
self._facet_plot(func, ax, plot_args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
def map_dataframe(self, func, *args, **kwargs):
"""Like ``.map`` but passes args as strings and inserts data in kwargs.
This method is suitable for plotting with functions that accept a
long-form DataFrame as a `data` keyword argument and access the
data in that DataFrame using string variable names.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. Unlike
the `map` method, a function used here must "understand" Pandas
objects. It also must plot to the currently active matplotlib Axes
and take a `color` keyword argument. If faceting on the `hue`
dimension, it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not str(func.__module__).startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = self.hue_names[hue_k]
# Stick the facet dataframe into the kwargs
if self._dropna:
data_ijk = data_ijk.dropna()
kwargs["data"] = data_ijk
# Draw the plot
self._facet_plot(func, ax, args, kwargs)
# For axis labels, prefer to use positional args for backcompat
# but also extract the x/y kwargs and use if no corresponding arg
axis_labels = [kwargs.get("x", None), kwargs.get("y", None)]
for i, val in enumerate(args[:2]):
axis_labels[i] = val
self._finalize_grid(axis_labels)
return self
def _facet_color(self, hue_index, kw_color):
color = self._colors[hue_index]
if kw_color is not None:
return kw_color
elif color is not None:
return color
def _facet_plot(self, func, ax, plot_args, plot_kwargs):
# Draw the plot
if str(func.__module__).startswith("seaborn"):
plot_kwargs = plot_kwargs.copy()
semantics = ["x", "y", "hue", "size", "style"]
for key, val in zip(semantics, plot_args):
plot_kwargs[key] = val
plot_args = []
plot_kwargs["ax"] = ax
func(*plot_args, **plot_kwargs)
# Sort out the supporting information
self._update_legend_data(ax)
def _finalize_grid(self, axlabels):
"""Finalize the annotations and layout."""
self.set_axis_labels(*axlabels)
self.tight_layout()
def facet_axis(self, row_i, col_j, modify_state=True):
"""Make the axis identified by these indices active and return it."""
# Calculate the actual indices of the axes to plot on
if self._col_wrap is not None:
ax = self.axes.flat[col_j]
else:
ax = self.axes[row_i, col_j]
# Get a reference to the axes object we want, and make it active
if modify_state:
plt.sca(ax)
return ax
def despine(self, **kwargs):
"""Remove axis spines from the facets."""
utils.despine(self._figure, **kwargs)
return self
def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
self._x_var = x_var
self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)
if y_var is not None:
self._y_var = y_var
self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)
return self
def set_xlabels(self, label=None, clear_inner=True, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = self._x_var
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
if clear_inner:
for ax in self._not_bottom_axes:
ax.set_xlabel("")
return self
def set_ylabels(self, label=None, clear_inner=True, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = self._y_var
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
if clear_inner:
for ax in self._not_left_axes:
ax.set_ylabel("")
return self
def set_xticklabels(self, labels=None, step=None, **kwargs):
"""Set x axis tick labels of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_xticks()
ax.set_xticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_xticklabels()]
if step is not None:
xticks = ax.get_xticks()[::step]
curr_labels = curr_labels[::step]
ax.set_xticks(xticks)
ax.set_xticklabels(curr_labels, **kwargs)
else:
ax.set_xticklabels(labels, **kwargs)
return self
def set_yticklabels(self, labels=None, **kwargs):
"""Set y axis tick labels on the left column of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_yticks()
ax.set_yticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_yticklabels()]
ax.set_yticklabels(curr_labels, **kwargs)
else:
ax.set_yticklabels(labels, **kwargs)
return self
def set_titles(self, template=None, row_template=None, col_template=None,
**kwargs):
"""Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for all titles with the formatting keys {col_var} and
{col_name} (if using a `col` faceting variable) and/or {row_var}
and {row_name} (if using a `row` faceting variable).
row_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {row_var} and {row_name} formatting keys.
col_template:
Template for the column variable when titles are drawn on the grid
margins. Must have {col_var} and {col_name} formatting keys.
Returns
-------
self: object
Returns self.
"""
args = dict(row_var=self._row_var, col_var=self._col_var)
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
# Establish default templates
if row_template is None:
row_template = "{row_var} = {row_name}"
if col_template is None:
col_template = "{col_var} = {col_name}"
if template is None:
if self._row_var is None:
template = col_template
elif self._col_var is None:
template = row_template
else:
template = " | ".join([row_template, col_template])
row_template = utils.to_utf8(row_template)
col_template = utils.to_utf8(col_template)
template = utils.to_utf8(template)
if self._margin_titles:
# Remove any existing title texts
for text in self._margin_titles_texts:
text.remove()
self._margin_titles_texts = []
if self.row_names is not None:
# Draw the row titles on the right edge of the grid
for i, row_name in enumerate(self.row_names):
ax = self.axes[i, -1]
args.update(dict(row_name=row_name))
title = row_template.format(**args)
text = ax.annotate(
title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center",
**kwargs
)
self._margin_titles_texts.append(text)
if self.col_names is not None:
# Draw the column titles as normal titles
for j, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = col_template.format(**args)
self.axes[0, j].set_title(title, **kwargs)
return self
# Otherwise title each facet with all the necessary information
if (self._row_var is not None) and (self._col_var is not None):
for i, row_name in enumerate(self.row_names):
for j, col_name in enumerate(self.col_names):
args.update(dict(row_name=row_name, col_name=col_name))
title = template.format(**args)
self.axes[i, j].set_title(title, **kwargs)
elif self.row_names is not None and len(self.row_names):
for i, row_name in enumerate(self.row_names):
args.update(dict(row_name=row_name))
title = template.format(**args)
self.axes[i, 0].set_title(title, **kwargs)
elif self.col_names is not None and len(self.col_names):
for i, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = template.format(**args)
# Index the flat array so col_wrap works
self.axes.flat[i].set_title(title, **kwargs)
return self
def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):
"""Add a reference line(s) to each facet.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s). Pass ``color=None`` to
use ``hue`` mapping.
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`FacetGrid` instance
Returns ``self`` for easy method chaining.
"""
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
self.map(plt.axvline, x=x, **line_kws)
if y is not None:
self.map(plt.axhline, y=y, **line_kws)
return self
# ------ Properties that are part of the public API and documented by Sphinx
def axes(self):
"""An array of the :class:`matplotlib.axes.Axes` objects in the grid."""
return self._axes
def ax(self):
"""The :class:`matplotlib.axes.Axes` when no faceting variables are assigned."""
if self.axes.shape == (1, 1):
return self.axes[0, 0]
else:
err = (
"Use the `.axes` attribute when facet variables are assigned."
)
raise AttributeError(err)
def axes_dict(self):
"""A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.
If only one of ``row`` or ``col`` is assigned, each key is a string
representing a level of that variable. If both facet dimensions are
assigned, each key is a ``({row_level}, {col_level})`` tuple.
"""
return self._axes_dict
# ------ Private properties, that require some computation to get
def _inner_axes(self):
"""Return a flat array of the inner axes."""
if self._col_wrap is None:
return self.axes[:-1, 1:].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i % self._ncol
and i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
def _left_axes(self):
"""Return a flat array of the left column of axes."""
if self._col_wrap is None:
return self.axes[:, 0].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if not i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
def _not_left_axes(self):
"""Return a flat array of axes that aren't on the left column."""
if self._col_wrap is None:
return self.axes[:, 1:].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
def _bottom_axes(self):
"""Return a flat array of the bottom row of axes."""
if self._col_wrap is None:
return self.axes[-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i >= (self._ncol * (self._nrow - 1))
or i >= (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
def _not_bottom_axes(self):
"""Return a flat array of axes that aren't on the bottom row."""
if self._col_wrap is None:
return self.axes[:-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
def catplot(
data=None, *, x=None, y=None, hue=None, row=None, col=None,
col_wrap=None, estimator="mean", errorbar=("ci", 95), n_boot=1000,
units=None, seed=None, order=None, hue_order=None, row_order=None,
col_order=None, height=5, aspect=1, kind="strip", native_scale=False,
formatter=None, orient=None, color=None, palette=None, hue_norm=None,
legend="auto", legend_out=True, sharex=True, sharey=True,
margin_titles=False, facet_kws=None, ci="deprecated",
**kwargs
):
# Determine the plotting function
try:
plot_func = globals()[kind + "plot"]
except KeyError:
err = f"Plot kind '{kind}' is not recognized"
raise ValueError(err)
# Check for attempt to plot onto specific axes and warn
if "ax" in kwargs:
msg = ("catplot is a figure-level function and does not accept "
f"target axes. You may wish to try {kind}plot")
warnings.warn(msg, UserWarning)
kwargs.pop("ax")
refactored_kinds = ["strip", "swarm"]
if kind in refactored_kinds:
p = _CategoricalFacetPlotter(
data=data,
variables=_CategoricalFacetPlotter.get_semantics(locals()),
order=order,
orient=orient,
require_numeric=False,
legend=legend,
)
# XXX Copying a fair amount from displot, which is not ideal
for var in ["row", "col"]:
# Handle faceting variables that lack name information
if var in p.variables and p.variables[var] is None:
p.variables[var] = f"_{var}_"
# Adapt the plot_data dataframe for use with FacetGrid
data = p.plot_data.rename(columns=p.variables)
data = data.loc[:, ~data.columns.duplicated()]
col_name = p.variables.get("col", None)
row_name = p.variables.get("row", None)
if facet_kws is None:
facet_kws = {}
g = FacetGrid(
data=data, row=row_name, col=col_name,
col_wrap=col_wrap, row_order=row_order,
col_order=col_order, height=height,
sharex=sharex, sharey=sharey,
aspect=aspect,
**facet_kws,
)
# Capture this here because scale_categorical is going to insert a (null)
# x variable even if it is empty. It's not clear whether that needs to
# happen or if disabling that is the cleaner solution.
has_xy_data = p.has_xy_data
if not native_scale or p.var_types[p.cat_axis] == "categorical":
p.scale_categorical(p.cat_axis, order=order, formatter=formatter)
p._attach(g)
if not has_xy_data:
return g
hue_order = p._palette_without_hue_backcompat(palette, hue_order)
palette, hue_order = p._hue_backcompat(color, palette, hue_order)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
# Set a default color
# Otherwise each artist will be plotted separately and trip the color cycle
if hue is None and color is None:
color = "C0"
if kind == "strip":
# TODO get these defaults programmatically?
jitter = kwargs.pop("jitter", True)
dodge = kwargs.pop("dodge", False)
edgecolor = kwargs.pop("edgecolor", "gray") # XXX TODO default
plot_kws = kwargs.copy()
# XXX Copying possibly bad default decisions from original code for now
plot_kws.setdefault("zorder", 3)
plot_kws.setdefault("s", plot_kws.pop("size", 5) ** 2)
plot_kws.setdefault("linewidth", 0)
p.plot_strips(
jitter=jitter,
dodge=dodge,
color=color,
edgecolor=edgecolor,
plot_kws=plot_kws,
)
elif kind == "swarm":
# TODO get these defaults programmatically?
dodge = kwargs.pop("dodge", False)
edgecolor = kwargs.pop("edgecolor", "gray") # XXX TODO default
warn_thresh = kwargs.pop("warn_thresh", .05)
plot_kws = kwargs.copy()
# XXX Copying possibly bad default decisions from original code for now
plot_kws.setdefault("zorder", 3)
plot_kws.setdefault("s", plot_kws.pop("size", 5) ** 2)
if plot_kws.setdefault("linewidth", 0) is None:
plot_kws["linewidth"] = np.sqrt(plot_kws["s"]) / 10
p.plot_swarms(
dodge=dodge,
color=color,
edgecolor=edgecolor,
warn_thresh=warn_thresh,
plot_kws=plot_kws,
)
# XXX best way to do this housekeeping?
for ax in g.axes.flat:
p._adjust_cat_axis(ax, axis=p.cat_axis)
g.set_axis_labels(
p.variables.get("x", None),
p.variables.get("y", None),
)
g.set_titles()
g.tight_layout()
# XXX Hack to get the legend data in the right place
for ax in g.axes.flat:
g._update_legend_data(ax)
ax.legend_ = None
if legend and (hue is not None) and (hue not in [x, row, col]):
g.add_legend(title=hue, label_order=hue_order)
return g
# Don't allow usage of forthcoming functionality
if native_scale is True:
err = f"native_scale not yet implemented for `kind={kind}`"
raise ValueError(err)
if formatter is not None:
err = f"formatter not yet implemented for `kind={kind}`"
raise ValueError(err)
# Alias the input variables to determine categorical order and palette
# correctly in the case of a count plot
if kind == "count":
if x is None and y is not None:
x_, y_, orient = y, y, "h"
elif y is None and x is not None:
x_, y_, orient = x, x, "v"
else:
raise ValueError("Either `x` or `y` must be None for kind='count'")
else:
x_, y_ = x, y
# Determine the order for the whole dataset, which will be used in all
# facets to ensure representation of all data in the final plot
plotter_class = {
"box": _BoxPlotter,
"violin": _ViolinPlotter,
"boxen": _LVPlotter,
"bar": _BarPlotter,
"point": _PointPlotter,
"count": _CountPlotter,
}[kind]
p = _CategoricalPlotter()
p.require_numeric = plotter_class.require_numeric
p.establish_variables(x_, y_, hue, data, orient, order, hue_order)
if (
order is not None
or (sharex and p.orient == "v")
or (sharey and p.orient == "h")
):
# Sync categorical axis between facets to have the same categories
order = p.group_names
elif color is None and hue is None:
msg = (
"Setting `{}=False` with `color=None` may cause different levels of the "
"`{}` variable to share colors. This will change in a future version."
)
if not sharex and p.orient == "v":
warnings.warn(msg.format("sharex", "x"), UserWarning)
if not sharey and p.orient == "h":
warnings.warn(msg.format("sharey", "y"), UserWarning)
hue_order = p.hue_names
# Determine the palette to use
# (FacetGrid will pass a value for ``color`` to the plotting function
# so we need to define ``palette`` to get default behavior for the
# categorical functions
p.establish_colors(color, palette, 1)
if kind != "point" or hue is not None:
palette = p.colors
# Determine keyword arguments for the facets
facet_kws = {} if facet_kws is None else facet_kws
facet_kws.update(
data=data, row=row, col=col,
row_order=row_order, col_order=col_order,
col_wrap=col_wrap, height=height, aspect=aspect,
sharex=sharex, sharey=sharey,
legend_out=legend_out, margin_titles=margin_titles,
dropna=False,
)
# Determine keyword arguments for the plotting function
plot_kws = dict(
order=order, hue_order=hue_order,
orient=orient, color=color, palette=palette,
)
plot_kws.update(kwargs)
if kind in ["bar", "point"]:
errorbar = utils._deprecate_ci(errorbar, ci)
plot_kws.update(
estimator=estimator, errorbar=errorbar,
n_boot=n_boot, units=units, seed=seed,
)
# Initialize the facets
g = FacetGrid(**facet_kws)
# Draw the plot onto the facets
g.map_dataframe(plot_func, x=x, y=y, hue=hue, **plot_kws)
if p.orient == "h":
g.set_axis_labels(p.value_label, p.group_label)
else:
g.set_axis_labels(p.group_label, p.value_label)
# Special case axis labels for a count type plot
if kind == "count":
if x is None:
g.set_axis_labels(x_var="count")
if y is None:
g.set_axis_labels(y_var="count")
if legend and (hue is not None) and (hue not in [x, row, col]):
hue_order = list(map(utils.to_utf8, hue_order))
g.add_legend(title=hue, label_order=hue_order)
return g | null |
171,786 | from __future__ import annotations
import re
from copy import copy
from collections.abc import Sequence
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Tuple, Optional, ClassVar
import numpy as np
import matplotlib as mpl
from matplotlib.ticker import (
Locator,
Formatter,
AutoLocator,
AutoMinorLocator,
FixedLocator,
LinearLocator,
LogLocator,
SymmetricalLogLocator,
MaxNLocator,
MultipleLocator,
EngFormatter,
FuncFormatter,
LogFormatterSciNotation,
ScalarFormatter,
StrMethodFormatter,
)
from matplotlib.dates import (
AutoDateLocator,
AutoDateFormatter,
ConciseDateFormatter,
)
from matplotlib.axis import Axis
from matplotlib.scale import ScaleBase
from pandas import Series
from seaborn._core.rules import categorical_order
from seaborn._core.typing import Default, default
from typing import TYPE_CHECKING
def _make_identity_transforms() -> TransFuncs:
def identity(x):
return x
return identity, identity | null |
171,787 | from __future__ import annotations
import re
from copy import copy
from collections.abc import Sequence
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Tuple, Optional, ClassVar
import numpy as np
import matplotlib as mpl
from matplotlib.ticker import (
Locator,
Formatter,
AutoLocator,
AutoMinorLocator,
FixedLocator,
LinearLocator,
LogLocator,
SymmetricalLogLocator,
MaxNLocator,
MultipleLocator,
EngFormatter,
FuncFormatter,
LogFormatterSciNotation,
ScalarFormatter,
StrMethodFormatter,
)
from matplotlib.dates import (
AutoDateLocator,
AutoDateFormatter,
ConciseDateFormatter,
)
from matplotlib.axis import Axis
from matplotlib.scale import ScaleBase
from pandas import Series
from seaborn._core.rules import categorical_order
from seaborn._core.typing import Default, default
from typing import TYPE_CHECKING
def _make_log_transforms(base: float | None = None) -> TransFuncs:
fs: TransFuncs
if base is None:
fs = np.log, np.exp
elif base == 2:
fs = np.log2, partial(np.power, 2)
elif base == 10:
fs = np.log10, partial(np.power, 10)
else:
def forward(x):
return np.log(x) / np.log(base)
fs = forward, partial(np.power, base)
def log(x: ArrayLike) -> ArrayLike:
with np.errstate(invalid="ignore", divide="ignore"):
return fs[0](x)
def exp(x: ArrayLike) -> ArrayLike:
with np.errstate(invalid="ignore", divide="ignore"):
return fs[1](x)
return log, exp
def _make_logit_transforms(base: float | None = None) -> TransFuncs:
log, exp = _make_log_transforms(base)
def logit(x):
with np.errstate(invalid="ignore", divide="ignore"):
return log(x) - log(1 - x)
def expit(x):
with np.errstate(invalid="ignore", divide="ignore"):
return exp(x) / (1 + exp(x))
return logit, expit | null |
171,788 | from __future__ import annotations
import re
from copy import copy
from collections.abc import Sequence
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Tuple, Optional, ClassVar
import numpy as np
import matplotlib as mpl
from matplotlib.ticker import (
Locator,
Formatter,
AutoLocator,
AutoMinorLocator,
FixedLocator,
LinearLocator,
LogLocator,
SymmetricalLogLocator,
MaxNLocator,
MultipleLocator,
EngFormatter,
FuncFormatter,
LogFormatterSciNotation,
ScalarFormatter,
StrMethodFormatter,
)
from matplotlib.dates import (
AutoDateLocator,
AutoDateFormatter,
ConciseDateFormatter,
)
from matplotlib.axis import Axis
from matplotlib.scale import ScaleBase
from pandas import Series
from seaborn._core.rules import categorical_order
from seaborn._core.typing import Default, default
from typing import TYPE_CHECKING
def _make_log_transforms(base: float | None = None) -> TransFuncs:
fs: TransFuncs
if base is None:
fs = np.log, np.exp
elif base == 2:
fs = np.log2, partial(np.power, 2)
elif base == 10:
fs = np.log10, partial(np.power, 10)
else:
def forward(x):
return np.log(x) / np.log(base)
fs = forward, partial(np.power, base)
def log(x: ArrayLike) -> ArrayLike:
with np.errstate(invalid="ignore", divide="ignore"):
return fs[0](x)
def exp(x: ArrayLike) -> ArrayLike:
with np.errstate(invalid="ignore", divide="ignore"):
return fs[1](x)
return log, exp
def _make_symlog_transforms(c: float = 1, base: float = 10) -> TransFuncs:
# From https://iopscience.iop.org/article/10.1088/0957-0233/24/2/027001
# Note: currently not using base because we only get
# one parameter from the string, and are using c (this is consistent with d3)
log, exp = _make_log_transforms(base)
def symlog(x):
with np.errstate(invalid="ignore", divide="ignore"):
return np.sign(x) * log(1 + np.abs(np.divide(x, c)))
def symexp(x):
with np.errstate(invalid="ignore", divide="ignore"):
return np.sign(x) * c * (exp(np.abs(x)) - 1)
return symlog, symexp | null |
171,789 | from __future__ import annotations
import re
from copy import copy
from collections.abc import Sequence
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Tuple, Optional, ClassVar
import numpy as np
import matplotlib as mpl
from matplotlib.ticker import (
Locator,
Formatter,
AutoLocator,
AutoMinorLocator,
FixedLocator,
LinearLocator,
LogLocator,
SymmetricalLogLocator,
MaxNLocator,
MultipleLocator,
EngFormatter,
FuncFormatter,
LogFormatterSciNotation,
ScalarFormatter,
StrMethodFormatter,
)
from matplotlib.dates import (
AutoDateLocator,
AutoDateFormatter,
ConciseDateFormatter,
)
from matplotlib.axis import Axis
from matplotlib.scale import ScaleBase
from pandas import Series
from seaborn._core.rules import categorical_order
from seaborn._core.typing import Default, default
from typing import TYPE_CHECKING
def _make_sqrt_transforms() -> TransFuncs:
def sqrt(x):
return np.sign(x) * np.sqrt(np.abs(x))
def square(x):
return np.sign(x) * np.square(x)
return sqrt, square | null |
171,790 | from __future__ import annotations
import re
from copy import copy
from collections.abc import Sequence
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Tuple, Optional, ClassVar
import numpy as np
import matplotlib as mpl
from matplotlib.ticker import (
Locator,
Formatter,
AutoLocator,
AutoMinorLocator,
FixedLocator,
LinearLocator,
LogLocator,
SymmetricalLogLocator,
MaxNLocator,
MultipleLocator,
EngFormatter,
FuncFormatter,
LogFormatterSciNotation,
ScalarFormatter,
StrMethodFormatter,
)
from matplotlib.dates import (
AutoDateLocator,
AutoDateFormatter,
ConciseDateFormatter,
)
from matplotlib.axis import Axis
from matplotlib.scale import ScaleBase
from pandas import Series
from seaborn._core.rules import categorical_order
from seaborn._core.typing import Default, default
from typing import TYPE_CHECKING
def _make_power_transforms(exp: float) -> TransFuncs:
def forward(x):
return np.sign(x) * np.power(np.abs(x), exp)
def inverse(x):
return np.sign(x) * np.power(np.abs(x), 1 / exp)
return forward, inverse | null |
171,791 | from __future__ import annotations
import re
from copy import copy
from collections.abc import Sequence
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Tuple, Optional, ClassVar
import numpy as np
import matplotlib as mpl
from matplotlib.ticker import (
Locator,
Formatter,
AutoLocator,
AutoMinorLocator,
FixedLocator,
LinearLocator,
LogLocator,
SymmetricalLogLocator,
MaxNLocator,
MultipleLocator,
EngFormatter,
FuncFormatter,
LogFormatterSciNotation,
ScalarFormatter,
StrMethodFormatter,
)
from matplotlib.dates import (
AutoDateLocator,
AutoDateFormatter,
ConciseDateFormatter,
)
from matplotlib.axis import Axis
from matplotlib.scale import ScaleBase
from pandas import Series
from seaborn._core.rules import categorical_order
from seaborn._core.typing import Default, default
from typing import TYPE_CHECKING
def _default_spacer(x: Series) -> float:
return 1 | null |
171,792 | from __future__ import annotations
import warnings
from collections import UserString
from numbers import Number
from datetime import datetime
import numpy as np
import pandas as pd
from seaborn.external.version import Version
from typing import TYPE_CHECKING
def variable_type(
vector: Series,
boolean_type: Literal["numeric", "categorical", "boolean"] = "numeric",
strict_boolean: bool = False,
) -> VarType:
"""
Determine whether a vector contains numeric, categorical, or datetime data.
This function differs from the pandas typing API in a few ways:
- Python sequences or object-typed PyData objects are considered numeric if
all of their entries are numeric.
- String or mixed-type data are considered categorical even if not
explicitly represented as a :class:`pandas.api.types.CategoricalDtype`.
- There is some flexibility about how to treat binary / boolean data.
Parameters
----------
vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence
Input data to test.
boolean_type : 'numeric', 'categorical', or 'boolean'
Type to use for vectors containing only 0s and 1s (and NAs).
strict_boolean : bool
If True, only consider data to be boolean when the dtype is bool or Boolean.
Returns
-------
var_type : 'numeric', 'categorical', or 'datetime'
Name identifying the type of data in the vector.
"""
# If a categorical dtype is set, infer categorical
if pd.api.types.is_categorical_dtype(vector):
return VarType("categorical")
# Special-case all-na data, which is always "numeric"
if pd.isna(vector).all():
return VarType("numeric")
# Special-case binary/boolean data, allow caller to determine
# This triggers a numpy warning when vector has strings/objects
# https://github.com/numpy/numpy/issues/6784
# Because we reduce with .all(), we are agnostic about whether the
# comparison returns a scalar or vector, so we will ignore the warning.
# It triggers a separate DeprecationWarning when the vector has datetimes:
# https://github.com/numpy/numpy/issues/13548
# This is considered a bug by numpy and will likely go away.
with warnings.catch_warnings():
warnings.simplefilter(
action='ignore',
category=(FutureWarning, DeprecationWarning) # type: ignore # mypy bug?
)
if strict_boolean:
if Version(pd.__version__) < Version("1.0.0"):
boolean_dtypes = ["bool"]
else:
boolean_dtypes = ["bool", "boolean"]
boolean_vector = vector.dtype in boolean_dtypes
else:
boolean_vector = bool(np.isin(vector, [0, 1, np.nan]).all())
if boolean_vector:
return VarType(boolean_type)
# Defer to positive pandas tests
if pd.api.types.is_numeric_dtype(vector):
return VarType("numeric")
if pd.api.types.is_datetime64_dtype(vector):
return VarType("datetime")
# --- If we get to here, we need to check the entries
# Check for a collection where everything is a number
def all_numeric(x):
for x_i in x:
if not isinstance(x_i, Number):
return False
return True
if all_numeric(vector):
return VarType("numeric")
# Check for a collection where everything is a datetime
def all_datetime(x):
for x_i in x:
if not isinstance(x_i, (datetime, np.datetime64)):
return False
return True
if all_datetime(vector):
return VarType("datetime")
# Otherwise, our final fallback is to consider things categorical
return VarType("categorical")
The provided code snippet includes necessary dependencies for implementing the `categorical_order` function. Write a Python function `def categorical_order(vector: Series, order: list | None = None) -> list` to solve the following problem:
Return a list of unique data values using seaborn's ordering rules. Parameters ---------- vector : Series Vector of "categorical" values order : list Desired order of category levels to override the order determined from the `data` object. Returns ------- order : list Ordered list of category levels not including null values.
Here is the function:
def categorical_order(vector: Series, order: list | None = None) -> list:
"""
Return a list of unique data values using seaborn's ordering rules.
Parameters
----------
vector : Series
Vector of "categorical" values
order : list
Desired order of category levels to override the order determined
from the `data` object.
Returns
-------
order : list
Ordered list of category levels not including null values.
"""
if order is not None:
return order
if vector.dtype.name == "category":
order = list(vector.cat.categories)
else:
order = list(filter(pd.notnull, vector.unique()))
if variable_type(pd.Series(order)) == "numeric":
order.sort()
return order | Return a list of unique data values using seaborn's ordering rules. Parameters ---------- vector : Series Vector of "categorical" values order : list Desired order of category levels to override the order determined from the `data` object. Returns ------- order : list Ordered list of category levels not including null values. |
171,793 | from __future__ import annotations
import io
import os
import re
import sys
import inspect
import itertools
import textwrap
from contextlib import contextmanager
from collections import abc
from collections.abc import Callable, Generator
from typing import Any, List, Optional, cast
from cycler import cycler
import pandas as pd
from pandas import DataFrame, Series, Index
import matplotlib as mpl
from matplotlib.axes import Axes
from matplotlib.artist import Artist
from matplotlib.figure import Figure
from seaborn._marks.base import Mark
from seaborn._stats.base import Stat
from seaborn._core.data import PlotData
from seaborn._core.moves import Move
from seaborn._core.scales import Scale, Nominal
from seaborn._core.subplots import Subplots
from seaborn._core.groupby import GroupBy
from seaborn._core.properties import PROPERTIES, Property
from seaborn._core.typing import (
DataSource,
VariableSpec,
VariableSpecList,
OrderSpec,
Default,
)
from seaborn._core.exceptions import PlotSpecError
from seaborn._core.rules import categorical_order
from seaborn._compat import set_scale_obj, set_layout_engine
from seaborn.rcmod import axes_style, plotting_context
from seaborn.palettes import color_palette
from seaborn.external.version import Version
from typing import TYPE_CHECKING
Any = object()
def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):
"""Return a list of colors or continuous colormap defining a palette.
Possible ``palette`` values include:
- Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)
- Name of matplotlib colormap
- 'husl' or 'hls'
- 'ch:<cubehelix arguments>'
- 'light:<color>', 'dark:<color>', 'blend:<color>,<color>',
- A sequence of colors in any format matplotlib accepts
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
See the :ref:`tutorial <palette_tutorial>` for more information.
Parameters
----------
palette : None, string, or sequence, optional
Name of palette or None to return current palette. If a sequence, input
colors are used but possibly cycled and desaturated.
n_colors : int, optional
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors,
but grabbing the current palette or passing in a list of colors will
not change the number of colors unless this is specified. Asking for
more colors than exist in the palette will cause it to cycle. Ignored
when ``as_cmap`` is True.
desat : float, optional
Proportion to desaturate each color by.
as_cmap : bool
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
set_palette : Set the default color cycle for all plots.
set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the seaborn palettes.
Examples
--------
.. include:: ../docstrings/color_palette.rst
"""
if palette is None:
palette = get_color_cycle()
if n_colors is None:
n_colors = len(palette)
elif not isinstance(palette, str):
palette = palette
if n_colors is None:
n_colors = len(palette)
else:
if n_colors is None:
# Use all colors in a qualitative palette or 6 of another kind
n_colors = QUAL_PALETTE_SIZES.get(palette, 6)
if palette in SEABORN_PALETTES:
# Named "seaborn variant" of matplotlib default color cycle
palette = SEABORN_PALETTES[palette]
elif palette == "hls":
# Evenly spaced colors in cylindrical RGB space
palette = hls_palette(n_colors, as_cmap=as_cmap)
elif palette == "husl":
# Evenly spaced colors in cylindrical Lab space
palette = husl_palette(n_colors, as_cmap=as_cmap)
elif palette.lower() == "jet":
# Paternalism
raise ValueError("No.")
elif palette.startswith("ch:"):
# Cubehelix palette with params specified in string
args, kwargs = _parse_cubehelix_args(palette)
palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)
elif palette.startswith("light:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("dark:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("blend:"):
# blend palette between colors specified in string
_, colors = palette.split(":")
colors = colors.split(",")
palette = blend_palette(colors, n_colors, as_cmap=as_cmap)
else:
try:
# Perhaps a named matplotlib colormap?
palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)
except (ValueError, KeyError): # Error class changed in mpl36
raise ValueError(f"{palette!r} is not a valid palette name")
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
if not as_cmap:
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError(f"Could not generate a palette for {palette}")
return palette
The provided code snippet includes necessary dependencies for implementing the `theme_context` function. Write a Python function `def theme_context(params: dict[str, Any]) -> Generator` to solve the following problem:
Temporarily modify specifc matplotlib rcParams.
Here is the function:
def theme_context(params: dict[str, Any]) -> Generator:
"""Temporarily modify specifc matplotlib rcParams."""
orig_params = {k: mpl.rcParams[k] for k in params}
color_codes = "bgrmyck"
nice_colors = [*color_palette("deep6"), (.15, .15, .15)]
orig_colors = [mpl.colors.colorConverter.colors[x] for x in color_codes]
# TODO how to allow this to reflect the color cycle when relevant?
try:
mpl.rcParams.update(params)
for (code, color) in zip(color_codes, nice_colors):
mpl.colors.colorConverter.colors[code] = color
mpl.colors.colorConverter.cache[code] = color
yield
finally:
mpl.rcParams.update(orig_params)
for (code, color) in zip(color_codes, orig_colors):
mpl.colors.colorConverter.colors[code] = color
mpl.colors.colorConverter.cache[code] = color | Temporarily modify specifc matplotlib rcParams. |
171,794 | from __future__ import annotations
import io
import os
import re
import sys
import inspect
import itertools
import textwrap
from contextlib import contextmanager
from collections import abc
from collections.abc import Callable, Generator
from typing import Any, List, Optional, cast
from cycler import cycler
import pandas as pd
from pandas import DataFrame, Series, Index
import matplotlib as mpl
from matplotlib.axes import Axes
from matplotlib.artist import Artist
from matplotlib.figure import Figure
from seaborn._marks.base import Mark
from seaborn._stats.base import Stat
from seaborn._core.data import PlotData
from seaborn._core.moves import Move
from seaborn._core.scales import Scale, Nominal
from seaborn._core.subplots import Subplots
from seaborn._core.groupby import GroupBy
from seaborn._core.properties import PROPERTIES, Property
from seaborn._core.typing import (
DataSource,
VariableSpec,
VariableSpecList,
OrderSpec,
Default,
)
from seaborn._core.exceptions import PlotSpecError
from seaborn._core.rules import categorical_order
from seaborn._compat import set_scale_obj, set_layout_engine
from seaborn.rcmod import axes_style, plotting_context
from seaborn.palettes import color_palette
from seaborn.external.version import Version
from typing import TYPE_CHECKING
default = Default()
PROPERTIES = {var: cls(var) for var, cls in PROPERTY_CLASSES.items()}
The provided code snippet includes necessary dependencies for implementing the `build_plot_signature` function. Write a Python function `def build_plot_signature(cls)` to solve the following problem:
Decorator function for giving Plot a useful signature. Currently this mostly saves us some duplicated typing, but we would like eventually to have a way of registering new semantic properties, at which point dynamic signature generation would become more important.
Here is the function:
def build_plot_signature(cls):
"""
Decorator function for giving Plot a useful signature.
Currently this mostly saves us some duplicated typing, but we would
like eventually to have a way of registering new semantic properties,
at which point dynamic signature generation would become more important.
"""
sig = inspect.signature(cls)
params = [
inspect.Parameter("args", inspect.Parameter.VAR_POSITIONAL),
inspect.Parameter("data", inspect.Parameter.KEYWORD_ONLY, default=None)
]
params.extend([
inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=None)
for name in PROPERTIES
])
new_sig = sig.replace(parameters=params)
cls.__signature__ = new_sig
known_properties = textwrap.fill(
", ".join([f"|{p}|" for p in PROPERTIES]),
width=78, subsequent_indent=" " * 8,
)
if cls.__doc__ is not None: # support python -OO mode
cls.__doc__ = cls.__doc__.format(known_properties=known_properties)
return cls | Decorator function for giving Plot a useful signature. Currently this mostly saves us some duplicated typing, but we would like eventually to have a way of registering new semantic properties, at which point dynamic signature generation would become more important. |
171,795 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
The provided code snippet includes necessary dependencies for implementing the `ci_to_errsize` function. Write a Python function `def ci_to_errsize(cis, heights)` to solve the following problem:
Convert intervals to error arguments relative to plot heights. Parameters ---------- cis : 2 x n sequence sequence of confidence interval limits heights : n sequence sequence of plot heights Returns ------- errsize : 2 x n array sequence of error size relative to height values in correct format as argument for plt.bar
Here is the function:
def ci_to_errsize(cis, heights):
"""Convert intervals to error arguments relative to plot heights.
Parameters
----------
cis : 2 x n sequence
sequence of confidence interval limits
heights : n sequence
sequence of plot heights
Returns
-------
errsize : 2 x n array
sequence of error size relative to height values in correct
format as argument for plt.bar
"""
cis = np.atleast_2d(cis).reshape(2, -1)
heights = np.atleast_1d(heights)
errsize = []
for i, (low, high) in enumerate(np.transpose(cis)):
h = heights[i]
elow = h - low
ehigh = high - h
errsize.append([elow, ehigh])
errsize = np.asarray(errsize).T
return errsize | Convert intervals to error arguments relative to plot heights. Parameters ---------- cis : 2 x n sequence sequence of confidence interval limits heights : n sequence sequence of plot heights Returns ------- errsize : 2 x n array sequence of error size relative to height values in correct format as argument for plt.bar |
171,796 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
The provided code snippet includes necessary dependencies for implementing the `_normal_quantile_func` function. Write a Python function `def _normal_quantile_func(q)` to solve the following problem:
Compute the quantile function of the standard normal distribution. This wrapper exists because we are dropping scipy as a mandatory dependency but statistics.NormalDist was added to the standard library in 3.8.
Here is the function:
def _normal_quantile_func(q):
"""
Compute the quantile function of the standard normal distribution.
This wrapper exists because we are dropping scipy as a mandatory dependency
but statistics.NormalDist was added to the standard library in 3.8.
"""
try:
from statistics import NormalDist
qf = np.vectorize(NormalDist().inv_cdf)
except ImportError:
try:
from scipy.stats import norm
qf = norm.ppf
except ImportError:
msg = (
"Standard normal quantile functions require either Python>=3.8 or scipy"
)
raise RuntimeError(msg)
return qf(q) | Compute the quantile function of the standard normal distribution. This wrapper exists because we are dropping scipy as a mandatory dependency but statistics.NormalDist was added to the standard library in 3.8. |
171,797 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
The provided code snippet includes necessary dependencies for implementing the `_draw_figure` function. Write a Python function `def _draw_figure(fig)` to solve the following problem:
Force draw of a matplotlib figure, accounting for back-compat.
Here is the function:
def _draw_figure(fig):
"""Force draw of a matplotlib figure, accounting for back-compat."""
# See https://github.com/matplotlib/matplotlib/issues/19197 for context
fig.canvas.draw()
if fig.stale:
try:
fig.draw(fig.canvas.get_renderer())
except AttributeError:
pass | Force draw of a matplotlib figure, accounting for back-compat. |
171,798 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
def set_hls_values(color, h=None, l=None, s=None): # noqa
"""Independently manipulate the h, l, or s channels of a color.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
h, l, s : floats between 0 and 1, or None
new values for each channel in hls space
Returns
-------
new_color : rgb tuple
new color code in RGB tuple representation
"""
# Get an RGB tuple representation
rgb = to_rgb(color)
vals = list(colorsys.rgb_to_hls(*rgb))
for i, val in enumerate([h, l, s]):
if val is not None:
vals[i] = val
rgb = colorsys.hls_to_rgb(*vals)
return rgb
The provided code snippet includes necessary dependencies for implementing the `saturate` function. Write a Python function `def saturate(color)` to solve the following problem:
Return a fully saturated color with the same hue. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name Returns ------- new_color : rgb tuple saturated color code in RGB tuple representation
Here is the function:
def saturate(color):
"""Return a fully saturated color with the same hue.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
Returns
-------
new_color : rgb tuple
saturated color code in RGB tuple representation
"""
return set_hls_values(color, s=1) | Return a fully saturated color with the same hue. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name Returns ------- new_color : rgb tuple saturated color code in RGB tuple representation |
171,799 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
The provided code snippet includes necessary dependencies for implementing the `axlabel` function. Write a Python function `def axlabel(xlabel, ylabel, **kwargs)` to solve the following problem:
Grab current axis and label it. DEPRECATED: will be removed in a future version.
Here is the function:
def axlabel(xlabel, ylabel, **kwargs):
"""Grab current axis and label it.
DEPRECATED: will be removed in a future version.
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg, FutureWarning)
ax = plt.gca()
ax.set_xlabel(xlabel, **kwargs)
ax.set_ylabel(ylabel, **kwargs) | Grab current axis and label it. DEPRECATED: will be removed in a future version. |
171,800 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
The provided code snippet includes necessary dependencies for implementing the `despine` function. Write a Python function `def despine(fig=None, ax=None, top=True, right=True, left=False, bottom=False, offset=None, trim=False)` to solve the following problem:
Remove the top and right spines from plot(s). fig : matplotlib figure, optional Figure to despine all axes of, defaults to the current figure. ax : matplotlib axes, optional Specific axes object to despine. Ignored if fig is provided. top, right, left, bottom : boolean, optional If True, remove that spine. offset : int or dict, optional Absolute distance, in points, spines should be moved away from the axes (negative values move spines inward). A single value applies to all spines; a dict can be used to set offset values per side. trim : bool, optional If True, limit spines to the smallest and largest major tick on each non-despined axis. Returns ------- None
Here is the function:
def despine(fig=None, ax=None, top=True, right=True, left=False,
bottom=False, offset=None, trim=False):
"""Remove the top and right spines from plot(s).
fig : matplotlib figure, optional
Figure to despine all axes of, defaults to the current figure.
ax : matplotlib axes, optional
Specific axes object to despine. Ignored if fig is provided.
top, right, left, bottom : boolean, optional
If True, remove that spine.
offset : int or dict, optional
Absolute distance, in points, spines should be moved away
from the axes (negative values move spines inward). A single value
applies to all spines; a dict can be used to set offset values per
side.
trim : bool, optional
If True, limit spines to the smallest and largest major tick
on each non-despined axis.
Returns
-------
None
"""
# Get references to the axes we want
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for side in ["top", "right", "left", "bottom"]:
# Toggle the spine objects
is_visible = not locals()[side]
ax_i.spines[side].set_visible(is_visible)
if offset is not None and is_visible:
try:
val = offset.get(side, 0)
except AttributeError:
val = offset
ax_i.spines[side].set_position(('outward', val))
# Potentially move the ticks
if left and not right:
maj_on = any(
t.tick1line.get_visible()
for t in ax_i.yaxis.majorTicks
)
min_on = any(
t.tick1line.get_visible()
for t in ax_i.yaxis.minorTicks
)
ax_i.yaxis.set_ticks_position("right")
for t in ax_i.yaxis.majorTicks:
t.tick2line.set_visible(maj_on)
for t in ax_i.yaxis.minorTicks:
t.tick2line.set_visible(min_on)
if bottom and not top:
maj_on = any(
t.tick1line.get_visible()
for t in ax_i.xaxis.majorTicks
)
min_on = any(
t.tick1line.get_visible()
for t in ax_i.xaxis.minorTicks
)
ax_i.xaxis.set_ticks_position("top")
for t in ax_i.xaxis.majorTicks:
t.tick2line.set_visible(maj_on)
for t in ax_i.xaxis.minorTicks:
t.tick2line.set_visible(min_on)
if trim:
# clip off the parts of the spines that extend past major ticks
xticks = np.asarray(ax_i.get_xticks())
if xticks.size:
firsttick = np.compress(xticks >= min(ax_i.get_xlim()),
xticks)[0]
lasttick = np.compress(xticks <= max(ax_i.get_xlim()),
xticks)[-1]
ax_i.spines['bottom'].set_bounds(firsttick, lasttick)
ax_i.spines['top'].set_bounds(firsttick, lasttick)
newticks = xticks.compress(xticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_xticks(newticks)
yticks = np.asarray(ax_i.get_yticks())
if yticks.size:
firsttick = np.compress(yticks >= min(ax_i.get_ylim()),
yticks)[0]
lasttick = np.compress(yticks <= max(ax_i.get_ylim()),
yticks)[-1]
ax_i.spines['left'].set_bounds(firsttick, lasttick)
ax_i.spines['right'].set_bounds(firsttick, lasttick)
newticks = yticks.compress(yticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_yticks(newticks) | Remove the top and right spines from plot(s). fig : matplotlib figure, optional Figure to despine all axes of, defaults to the current figure. ax : matplotlib axes, optional Specific axes object to despine. Ignored if fig is provided. top, right, left, bottom : boolean, optional If True, remove that spine. offset : int or dict, optional Absolute distance, in points, spines should be moved away from the axes (negative values move spines inward). A single value applies to all spines; a dict can be used to set offset values per side. trim : bool, optional If True, limit spines to the smallest and largest major tick on each non-despined axis. Returns ------- None |
171,801 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
class Grid(_BaseGrid):
"""A grid that can have multiple subplots and an external legend."""
_margin_titles = False
_legend_out = True
def __init__(self):
self._tight_layout_rect = [0, 0, 1, 1]
self._tight_layout_pad = None
# This attribute is set externally and is a hack to handle newer functions that
# don't add proxy artists onto the Axes. We need an overall cleaner approach.
self._extract_legend_handles = False
def tight_layout(self, *args, **kwargs):
"""Call fig.tight_layout within rect that exclude the legend."""
kwargs = kwargs.copy()
kwargs.setdefault("rect", self._tight_layout_rect)
if self._tight_layout_pad is not None:
kwargs.setdefault("pad", self._tight_layout_pad)
self._figure.tight_layout(*args, **kwargs)
return self
def add_legend(self, legend_data=None, title=None, label_order=None,
adjust_subtitles=False, **kwargs):
"""Draw a legend, maybe placing it outside axes and resizing the figure.
Parameters
----------
legend_data : dict
Dictionary mapping label names (or two-element tuples where the
second element is a label name) to matplotlib artist handles. The
default reads from ``self._legend_data``.
title : string
Title for the legend. The default reads from ``self._hue_var``.
label_order : list of labels
The order that the legend entries should appear in. The default
reads from ``self.hue_names``.
adjust_subtitles : bool
If True, modify entries with invisible artists to left-align
the labels and set the font size to that of a title.
kwargs : key, value pairings
Other keyword arguments are passed to the underlying legend methods
on the Figure or Axes object.
Returns
-------
self : Grid instance
Returns self for easy chaining.
"""
# Find the data for the legend
if legend_data is None:
legend_data = self._legend_data
if label_order is None:
if self.hue_names is None:
label_order = list(legend_data.keys())
else:
label_order = list(map(utils.to_utf8, self.hue_names))
blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)
handles = [legend_data.get(l, blank_handle) for l in label_order]
title = self._hue_var if title is None else title
title_size = mpl.rcParams["legend.title_fontsize"]
# Unpack nested labels from a hierarchical legend
labels = []
for entry in label_order:
if isinstance(entry, tuple):
_, label = entry
else:
label = entry
labels.append(label)
# Set default legend kwargs
kwargs.setdefault("scatterpoints", 1)
if self._legend_out:
kwargs.setdefault("frameon", False)
kwargs.setdefault("loc", "center right")
# Draw a full-figure legend outside the grid
figlegend = self._figure.legend(handles, labels, **kwargs)
self._legend = figlegend
figlegend.set_title(title, prop={"size": title_size})
if adjust_subtitles:
adjust_legend_subtitles(figlegend)
# Draw the plot to set the bounding boxes correctly
_draw_figure(self._figure)
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self._figure.dpi
fig_width, fig_height = self._figure.get_size_inches()
self._figure.set_size_inches(fig_width + legend_width, fig_height)
# Draw the plot again to get the new transformations
_draw_figure(self._figure)
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self._figure.dpi
space_needed = legend_width / (fig_width + legend_width)
margin = .04 if self._margin_titles else .01
self._space_needed = margin + space_needed
right = 1 - self._space_needed
# Place the subplot axes to give space for the legend
self._figure.subplots_adjust(right=right)
self._tight_layout_rect[2] = right
else:
# Draw a legend in the first axis
ax = self.axes.flat[0]
kwargs.setdefault("loc", "best")
leg = ax.legend(handles, labels, **kwargs)
leg.set_title(title, prop={"size": title_size})
self._legend = leg
if adjust_subtitles:
adjust_legend_subtitles(leg)
return self
def _update_legend_data(self, ax):
"""Extract the legend data from an axes object and save it."""
data = {}
# Get data directly from the legend, which is necessary
# for newer functions that don't add labeled proxy artists
if ax.legend_ is not None and self._extract_legend_handles:
handles = ax.legend_.legendHandles
labels = [t.get_text() for t in ax.legend_.texts]
data.update({l: h for h, l in zip(handles, labels)})
handles, labels = ax.get_legend_handles_labels()
data.update({l: h for h, l in zip(handles, labels)})
self._legend_data.update(data)
# Now clear the legend
ax.legend_ = None
def _get_palette(self, data, hue, hue_order, palette):
"""Get a list of colors for the hue variable."""
if hue is None:
palette = color_palette(n_colors=1)
else:
hue_names = categorical_order(data[hue], hue_order)
n_colors = len(hue_names)
# By default use either the current color palette or HUSL
if palette is None:
current_palette = utils.get_color_cycle()
if n_colors > len(current_palette):
colors = color_palette("husl", n_colors)
else:
colors = color_palette(n_colors=n_colors)
# Allow for palette to map from hue variable names
elif isinstance(palette, dict):
color_names = [palette[h] for h in hue_names]
colors = color_palette(color_names, n_colors)
# Otherwise act as if we just got a list of colors
else:
colors = color_palette(palette, n_colors)
palette = color_palette(colors, n_colors)
return palette
def legend(self):
"""The :class:`matplotlib.legend.Legend` object, if present."""
try:
return self._legend
except AttributeError:
return None
def tick_params(self, axis='both', **kwargs):
"""Modify the ticks, tick labels, and gridlines.
Parameters
----------
axis : {'x', 'y', 'both'}
The axis on which to apply the formatting.
kwargs : keyword arguments
Additional keyword arguments to pass to
:meth:`matplotlib.axes.Axes.tick_params`.
Returns
-------
self : Grid instance
Returns self for easy chaining.
"""
for ax in self.figure.axes:
ax.tick_params(axis=axis, **kwargs)
return self
The provided code snippet includes necessary dependencies for implementing the `move_legend` function. Write a Python function `def move_legend(obj, loc, **kwargs)` to solve the following problem:
Recreate a plot's legend at a new location. The name is a slight misnomer. Matplotlib legends do not expose public control over their position parameters. So this function creates a new legend, copying over the data from the original object, which is then removed. Parameters ---------- obj : the object with the plot This argument can be either a seaborn or matplotlib object: - :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid` - :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure` loc : str or int Location argument, as in :meth:`matplotlib.axes.Axes.legend`. kwargs Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`. Examples -------- .. include:: ../docstrings/move_legend.rst
Here is the function:
def move_legend(obj, loc, **kwargs):
"""
Recreate a plot's legend at a new location.
The name is a slight misnomer. Matplotlib legends do not expose public
control over their position parameters. So this function creates a new legend,
copying over the data from the original object, which is then removed.
Parameters
----------
obj : the object with the plot
This argument can be either a seaborn or matplotlib object:
- :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid`
- :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure`
loc : str or int
Location argument, as in :meth:`matplotlib.axes.Axes.legend`.
kwargs
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`.
Examples
--------
.. include:: ../docstrings/move_legend.rst
"""
# This is a somewhat hackish solution that will hopefully be obviated by
# upstream improvements to matplotlib legends that make them easier to
# modify after creation.
from seaborn.axisgrid import Grid # Avoid circular import
# Locate the legend object and a method to recreate the legend
if isinstance(obj, Grid):
old_legend = obj.legend
legend_func = obj.figure.legend
elif isinstance(obj, mpl.axes.Axes):
old_legend = obj.legend_
legend_func = obj.legend
elif isinstance(obj, mpl.figure.Figure):
if obj.legends:
old_legend = obj.legends[-1]
else:
old_legend = None
legend_func = obj.legend
else:
err = "`obj` must be a seaborn Grid or matplotlib Axes or Figure instance."
raise TypeError(err)
if old_legend is None:
err = f"{obj} has no legend attached."
raise ValueError(err)
# Extract the components of the legend we need to reuse
handles = old_legend.legendHandles
labels = [t.get_text() for t in old_legend.get_texts()]
# Extract legend properties that can be passed to the recreation method
# (Vexingly, these don't all round-trip)
legend_kws = inspect.signature(mpl.legend.Legend).parameters
props = {k: v for k, v in old_legend.properties().items() if k in legend_kws}
# Delegate default bbox_to_anchor rules to matplotlib
props.pop("bbox_to_anchor")
# Try to propagate the existing title and font properties; respect new ones too
title = props.pop("title")
if "title" in kwargs:
title.set_text(kwargs.pop("title"))
title_kwargs = {k: v for k, v in kwargs.items() if k.startswith("title_")}
for key, val in title_kwargs.items():
title.set(**{key[6:]: val})
kwargs.pop(key)
# Try to respect the frame visibility
kwargs.setdefault("frameon", old_legend.legendPatch.get_visible())
# Remove the old legend and create the new one
props.update(kwargs)
old_legend.remove()
new_legend = legend_func(handles, labels, loc=loc, **props)
new_legend.set_title(title.get_text(), title.get_fontproperties())
# Let the Grid object continue to track the correct legend object
if isinstance(obj, Grid):
obj._legend = new_legend | Recreate a plot's legend at a new location. The name is a slight misnomer. Matplotlib legends do not expose public control over their position parameters. So this function creates a new legend, copying over the data from the original object, which is then removed. Parameters ---------- obj : the object with the plot This argument can be either a seaborn or matplotlib object: - :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid` - :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure` loc : str or int Location argument, as in :meth:`matplotlib.axes.Axes.legend`. kwargs Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`. Examples -------- .. include:: ../docstrings/move_legend.rst |
171,802 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
def get_dataset_names():
"""Report available example datasets, useful for reporting issues.
Requires an internet connection.
"""
url = "https://github.com/mwaskom/seaborn-data"
with urlopen(url) as resp:
html = resp.read()
pat = r"/mwaskom/seaborn-data/blob/master/(\w*).csv"
datasets = re.findall(pat, html.decode())
return datasets
def get_data_home(data_home=None):
"""Return a path to the cache directory for example datasets.
This directory is used by :func:`load_dataset`.
If the ``data_home`` argument is not provided, it will use a directory
specified by the `SEABORN_DATA` environment variable (if it exists)
or otherwise default to an OS-appropriate user cache location.
"""
if data_home is None:
data_home = os.environ.get("SEABORN_DATA", user_cache_dir("seaborn"))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
The provided code snippet includes necessary dependencies for implementing the `load_dataset` function. Write a Python function `def load_dataset(name, cache=True, data_home=None, **kws)` to solve the following problem:
Load an example dataset from the online repository (requires internet). This function provides quick access to a small number of example datasets that are useful for documenting seaborn or generating reproducible examples for bug reports. It is not necessary for normal usage. Note that some of the datasets have a small amount of preprocessing applied to define a proper ordering for categorical variables. Use :func:`get_dataset_names` to see a list of available datasets. Parameters ---------- name : str Name of the dataset (``{name}.csv`` on https://github.com/mwaskom/seaborn-data). cache : boolean, optional If True, try to load from the local cache first, and save to the cache if a download is required. data_home : string, optional The directory in which to cache data; see :func:`get_data_home`. kws : keys and values, optional Additional keyword arguments are passed to passed through to :func:`pandas.read_csv`. Returns ------- df : :class:`pandas.DataFrame` Tabular data, possibly with some preprocessing applied.
Here is the function:
def load_dataset(name, cache=True, data_home=None, **kws):
"""Load an example dataset from the online repository (requires internet).
This function provides quick access to a small number of example datasets
that are useful for documenting seaborn or generating reproducible examples
for bug reports. It is not necessary for normal usage.
Note that some of the datasets have a small amount of preprocessing applied
to define a proper ordering for categorical variables.
Use :func:`get_dataset_names` to see a list of available datasets.
Parameters
----------
name : str
Name of the dataset (``{name}.csv`` on
https://github.com/mwaskom/seaborn-data).
cache : boolean, optional
If True, try to load from the local cache first, and save to the cache
if a download is required.
data_home : string, optional
The directory in which to cache data; see :func:`get_data_home`.
kws : keys and values, optional
Additional keyword arguments are passed to passed through to
:func:`pandas.read_csv`.
Returns
-------
df : :class:`pandas.DataFrame`
Tabular data, possibly with some preprocessing applied.
"""
# A common beginner mistake is to assume that one's personal data needs
# to be passed through this function to be usable with seaborn.
# Let's provide a more helpful error than you would otherwise get.
if isinstance(name, pd.DataFrame):
err = (
"This function accepts only strings (the name of an example dataset). "
"You passed a pandas DataFrame. If you have your own dataset, "
"it is not necessary to use this function before plotting."
)
raise TypeError(err)
url = f"https://raw.githubusercontent.com/mwaskom/seaborn-data/master/{name}.csv"
if cache:
cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))
if not os.path.exists(cache_path):
if name not in get_dataset_names():
raise ValueError(f"'{name}' is not one of the example datasets.")
urlretrieve(url, cache_path)
full_path = cache_path
else:
full_path = url
df = pd.read_csv(full_path, **kws)
if df.iloc[-1].isnull().all():
df = df.iloc[:-1]
# Set some columns as a categorical type with ordered levels
if name == "tips":
df["day"] = pd.Categorical(df["day"], ["Thur", "Fri", "Sat", "Sun"])
df["sex"] = pd.Categorical(df["sex"], ["Male", "Female"])
df["time"] = pd.Categorical(df["time"], ["Lunch", "Dinner"])
df["smoker"] = pd.Categorical(df["smoker"], ["Yes", "No"])
elif name == "flights":
months = df["month"].str[:3]
df["month"] = pd.Categorical(months, months.unique())
elif name == "exercise":
df["time"] = pd.Categorical(df["time"], ["1 min", "15 min", "30 min"])
df["kind"] = pd.Categorical(df["kind"], ["rest", "walking", "running"])
df["diet"] = pd.Categorical(df["diet"], ["no fat", "low fat"])
elif name == "titanic":
df["class"] = pd.Categorical(df["class"], ["First", "Second", "Third"])
df["deck"] = pd.Categorical(df["deck"], list("ABCDEFG"))
elif name == "penguins":
df["sex"] = df["sex"].str.title()
elif name == "diamonds":
df["color"] = pd.Categorical(
df["color"], ["D", "E", "F", "G", "H", "I", "J"],
)
df["clarity"] = pd.Categorical(
df["clarity"], ["IF", "VVS1", "VVS2", "VS1", "VS2", "SI1", "SI2", "I1"],
)
df["cut"] = pd.Categorical(
df["cut"], ["Ideal", "Premium", "Very Good", "Good", "Fair"],
)
elif name == "taxis":
df["pickup"] = pd.to_datetime(df["pickup"])
df["dropoff"] = pd.to_datetime(df["dropoff"])
elif name == "seaice":
df["Date"] = pd.to_datetime(df["Date"])
elif name == "dowjones":
df["Date"] = pd.to_datetime(df["Date"])
return df | Load an example dataset from the online repository (requires internet). This function provides quick access to a small number of example datasets that are useful for documenting seaborn or generating reproducible examples for bug reports. It is not necessary for normal usage. Note that some of the datasets have a small amount of preprocessing applied to define a proper ordering for categorical variables. Use :func:`get_dataset_names` to see a list of available datasets. Parameters ---------- name : str Name of the dataset (``{name}.csv`` on https://github.com/mwaskom/seaborn-data). cache : boolean, optional If True, try to load from the local cache first, and save to the cache if a download is required. data_home : string, optional The directory in which to cache data; see :func:`get_data_home`. kws : keys and values, optional Additional keyword arguments are passed to passed through to :func:`pandas.read_csv`. Returns ------- df : :class:`pandas.DataFrame` Tabular data, possibly with some preprocessing applied. |
171,803 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
def axis_ticklabels_overlap(labels):
"""Return a boolean for whether the list of ticklabels have overlaps.
Parameters
----------
labels : list of matplotlib ticklabels
Returns
-------
overlap : boolean
True if any of the labels overlap.
"""
if not labels:
return False
try:
bboxes = [l.get_window_extent() for l in labels]
overlaps = [b.count_overlaps(bboxes) for b in bboxes]
return max(overlaps) > 1
except RuntimeError:
# Issue on macos backend raises an error in the above code
return False
The provided code snippet includes necessary dependencies for implementing the `axes_ticklabels_overlap` function. Write a Python function `def axes_ticklabels_overlap(ax)` to solve the following problem:
Return booleans for whether the x and y ticklabels on an Axes overlap. Parameters ---------- ax : matplotlib Axes Returns ------- x_overlap, y_overlap : booleans True when the labels on that axis overlap.
Here is the function:
def axes_ticklabels_overlap(ax):
"""Return booleans for whether the x and y ticklabels on an Axes overlap.
Parameters
----------
ax : matplotlib Axes
Returns
-------
x_overlap, y_overlap : booleans
True when the labels on that axis overlap.
"""
return (axis_ticklabels_overlap(ax.get_xticklabels()),
axis_ticklabels_overlap(ax.get_yticklabels())) | Return booleans for whether the x and y ticklabels on an Axes overlap. Parameters ---------- ax : matplotlib Axes Returns ------- x_overlap, y_overlap : booleans True when the labels on that axis overlap. |
171,804 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
The provided code snippet includes necessary dependencies for implementing the `locator_to_legend_entries` function. Write a Python function `def locator_to_legend_entries(locator, limits, dtype)` to solve the following problem:
Return levels and formatted levels for brief numeric legends.
Here is the function:
def locator_to_legend_entries(locator, limits, dtype):
"""Return levels and formatted levels for brief numeric legends."""
raw_levels = locator.tick_values(*limits).astype(dtype)
# The locator can return ticks outside the limits, clip them here
raw_levels = [l for l in raw_levels if l >= limits[0] and l <= limits[1]]
class dummy_axis:
def get_view_interval(self):
return limits
if isinstance(locator, mpl.ticker.LogLocator):
formatter = mpl.ticker.LogFormatter()
else:
formatter = mpl.ticker.ScalarFormatter()
# Avoid having an offset/scientific notation which we don't currently
# have any way of representing in the legend
formatter.set_useOffset(False)
formatter.set_scientific(False)
formatter.axis = dummy_axis()
# TODO: The following two lines should be replaced
# once pinned matplotlib>=3.1.0 with:
# formatted_levels = formatter.format_ticks(raw_levels)
formatter.set_locs(raw_levels)
formatted_levels = [formatter(x) for x in raw_levels]
return raw_levels, formatted_levels | Return levels and formatted levels for brief numeric legends. |
171,805 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
The provided code snippet includes necessary dependencies for implementing the `relative_luminance` function. Write a Python function `def relative_luminance(color)` to solve the following problem:
Calculate the relative luminance of a color according to W3C standards Parameters ---------- color : matplotlib color or sequence of matplotlib colors Hex code, rgb-tuple, or html color name. Returns ------- luminance : float(s) between 0 and 1
Here is the function:
def relative_luminance(color):
"""Calculate the relative luminance of a color according to W3C standards
Parameters
----------
color : matplotlib color or sequence of matplotlib colors
Hex code, rgb-tuple, or html color name.
Returns
-------
luminance : float(s) between 0 and 1
"""
rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]
rgb = np.where(rgb <= .03928, rgb / 12.92, ((rgb + .055) / 1.055) ** 2.4)
lum = rgb.dot([.2126, .7152, .0722])
try:
return lum.item()
except ValueError:
return lum | Calculate the relative luminance of a color according to W3C standards Parameters ---------- color : matplotlib color or sequence of matplotlib colors Hex code, rgb-tuple, or html color name. Returns ------- luminance : float(s) between 0 and 1 |
171,806 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
The provided code snippet includes necessary dependencies for implementing the `adjust_legend_subtitles` function. Write a Python function `def adjust_legend_subtitles(legend)` to solve the following problem:
Make invisible-handle "subtitles" entries look more like titles. Note: This function is not part of the public API and may be changed or removed.
Here is the function:
def adjust_legend_subtitles(legend):
"""
Make invisible-handle "subtitles" entries look more like titles.
Note: This function is not part of the public API and may be changed or removed.
"""
# Legend title not in rcParams until 3.0
font_size = plt.rcParams.get("legend.title_fontsize", None)
hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()
for hpack in hpackers:
draw_area, text_area = hpack.get_children()
handles = draw_area.get_children()
if not all(artist.get_visible() for artist in handles):
draw_area.set_width(0)
for text in text_area.get_children():
if font_size is not None:
text.set_size(font_size) | Make invisible-handle "subtitles" entries look more like titles. Note: This function is not part of the public API and may be changed or removed. |
171,807 | import os
import re
import inspect
import warnings
import colorsys
from contextlib import contextmanager
from urllib.request import urlopen, urlretrieve
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.colors import to_rgb
import matplotlib.pyplot as plt
from matplotlib.cbook import normalize_kwargs
from .external.version import Version
from .external.appdirs import user_cache_dir
The provided code snippet includes necessary dependencies for implementing the `_disable_autolayout` function. Write a Python function `def _disable_autolayout()` to solve the following problem:
Context manager for preventing rc-controlled auto-layout behavior.
Here is the function:
def _disable_autolayout():
"""Context manager for preventing rc-controlled auto-layout behavior."""
# This is a workaround for an issue in matplotlib, for details see
# https://github.com/mwaskom/seaborn/issues/2914
# The only affect of this rcParam is to set the default value for
# layout= in plt.figure, so we could just do that instead.
# But then we would need to own the complexity of the transition
# from tight_layout=True -> layout="tight". This seems easier,
# but can be removed when (if) that is simpler on the matplotlib side,
# or if the layout algorithms are improved to handle figure legends.
orig_val = mpl.rcParams["figure.autolayout"]
try:
mpl.rcParams["figure.autolayout"] = False
yield
finally:
mpl.rcParams["figure.autolayout"] = orig_val | Context manager for preventing rc-controlled auto-layout behavior. |
171,808 | import copy
from textwrap import dedent
import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from . import utils
from . import algorithms as algo
from .axisgrid import FacetGrid, _facet_docs
def regplot(
data=None, *, x=None, y=None,
x_estimator=None, x_bins=None, x_ci="ci",
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
seed=None, order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None,
truncate=True, dropna=True, x_jitter=None, y_jitter=None,
label=None, color=None, marker="o",
scatter_kws=None, line_kws=None, ax=None
):
plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
scatter, fit_reg, ci, n_boot, units, seed,
order, logistic, lowess, robust, logx,
x_partial, y_partial, truncate, dropna,
x_jitter, y_jitter, color, label)
if ax is None:
ax = plt.gca()
scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
scatter_kws["marker"] = marker
line_kws = {} if line_kws is None else copy.copy(line_kws)
plotter.plot(ax, scatter_kws, line_kws)
return ax
regplot.__doc__ = dedent("""\
Plot data and a linear regression model fit.
{model_api}
Parameters
----------
x, y: string, series, or vector array
Input variables. If strings, these should correspond with column names
in ``data``. When pandas objects are used, axes will be labeled with
the series name.
{data}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{seed}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
label : string
Label to apply to either the scatterplot or regression line (if
``scatter`` is ``False``) for use in a legend.
color : matplotlib color
Color to apply to all plot elements; will be superseded by colors
passed in ``scatter_kws`` or ``line_kws``.
marker : matplotlib marker code
Marker to use for the scatterplot glyphs.
{scatter_line_kws}
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
Returns
-------
ax : matplotlib Axes
The Axes object containing the plot.
See Also
--------
lmplot : Combine :func:`regplot` and :class:`FacetGrid` to plot multiple
linear relationships in a dataset.
jointplot : Combine :func:`regplot` and :class:`JointGrid` (when used with
``kind="reg"``).
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
residplot : Plot the residuals of a linear regression model.
Notes
-----
{regplot_vs_lmplot}
It's also easy to combine :func:`regplot` and :class:`JointGrid` or
:class:`PairGrid` through the :func:`jointplot` and :func:`pairplot`
functions, although these do not directly accept all of :func:`regplot`'s
parameters.
Examples
--------
.. include: ../docstrings/regplot.rst
""").format(**_regression_docs)
class FacetGrid(Grid):
"""Multi-plot grid for plotting conditional relationships."""
def __init__(
self, data, *,
row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, height=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=False, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None,
):
super().__init__()
# Determine the hue facet layer information
hue_var = hue
if hue is None:
hue_names = None
else:
hue_names = categorical_order(data[hue], hue_order)
colors = self._get_palette(data, hue, hue_order, palette)
# Set up the lists of names for the row and column facet variables
if row is None:
row_names = []
else:
row_names = categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = categorical_order(data[col], col_order)
# Additional dict of kwarg -> list of values for mapping the hue var
hue_kws = hue_kws if hue_kws is not None else {}
# Make a boolean mask that is True anywhere there is an NA
# value in one of the faceting variables, but only if dropna is True
none_na = np.zeros(len(data), bool)
if dropna:
row_na = none_na if row is None else data[row].isnull()
col_na = none_na if col is None else data[col].isnull()
hue_na = none_na if hue is None else data[hue].isnull()
not_na = ~(row_na | col_na | hue_na)
else:
not_na = ~none_na
# Compute the grid shape
ncol = 1 if col is None else len(col_names)
nrow = 1 if row is None else len(row_names)
self._n_facets = ncol * nrow
self._col_wrap = col_wrap
if col_wrap is not None:
if row is not None:
err = "Cannot use `row` and `col_wrap` together."
raise ValueError(err)
ncol = col_wrap
nrow = int(np.ceil(len(col_names) / col_wrap))
self._ncol = ncol
self._nrow = nrow
# Calculate the base figure size
# This can get stretched later by a legend
# TODO this doesn't account for axis labels
figsize = (ncol * height * aspect, nrow * height)
# Validate some inputs
if col_wrap is not None:
margin_titles = False
# Build the subplot keyword dictionary
subplot_kws = {} if subplot_kws is None else subplot_kws.copy()
gridspec_kws = {} if gridspec_kws is None else gridspec_kws.copy()
if xlim is not None:
subplot_kws["xlim"] = xlim
if ylim is not None:
subplot_kws["ylim"] = ylim
# --- Initialize the subplot grid
with _disable_autolayout():
fig = plt.figure(figsize=figsize)
if col_wrap is None:
kwargs = dict(squeeze=False,
sharex=sharex, sharey=sharey,
subplot_kw=subplot_kws,
gridspec_kw=gridspec_kws)
axes = fig.subplots(nrow, ncol, **kwargs)
if col is None and row is None:
axes_dict = {}
elif col is None:
axes_dict = dict(zip(row_names, axes.flat))
elif row is None:
axes_dict = dict(zip(col_names, axes.flat))
else:
facet_product = product(row_names, col_names)
axes_dict = dict(zip(facet_product, axes.flat))
else:
# If wrapping the col variable we need to make the grid ourselves
if gridspec_kws:
warnings.warn("`gridspec_kws` ignored when using `col_wrap`")
n_axes = len(col_names)
axes = np.empty(n_axes, object)
axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)
if sharex:
subplot_kws["sharex"] = axes[0]
if sharey:
subplot_kws["sharey"] = axes[0]
for i in range(1, n_axes):
axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)
axes_dict = dict(zip(col_names, axes))
# --- Set up the class attributes
# Attributes that are part of the public API but accessed through
# a property so that Sphinx adds them to the auto class doc
self._figure = fig
self._axes = axes
self._axes_dict = axes_dict
self._legend = None
# Public attributes that aren't explicitly documented
# (It's not obvious that having them be public was a good idea)
self.data = data
self.row_names = row_names
self.col_names = col_names
self.hue_names = hue_names
self.hue_kws = hue_kws
# Next the private variables
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._margin_titles = margin_titles
self._margin_titles_texts = []
self._col_wrap = col_wrap
self._hue_var = hue_var
self._colors = colors
self._legend_out = legend_out
self._legend_data = {}
self._x_var = None
self._y_var = None
self._sharex = sharex
self._sharey = sharey
self._dropna = dropna
self._not_na = not_na
# --- Make the axes look good
self.set_titles()
self.tight_layout()
if despine:
self.despine()
if sharex in [True, 'col']:
for ax in self._not_bottom_axes:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
if sharey in [True, 'row']:
for ax in self._not_left_axes:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
__init__.__doc__ = dedent("""\
Initialize the matplotlib figure and FacetGrid object.
This class maps a dataset onto multiple axes arrayed in a grid of rows
and columns that correspond to *levels* of variables in the dataset.
The plots it produces are often called "lattice", "trellis", or
"small-multiple" graphics.
It can also represent levels of a third variable with the ``hue``
parameter, which plots different subsets of data in different colors.
This uses color to resolve elements on a third dimension, but only
draws subsets on top of each other and will not tailor the ``hue``
parameter for the specific visualization the way that axes-level
functions that accept ``hue`` will.
The basic workflow is to initialize the :class:`FacetGrid` object with
the dataset and the variables that are used to structure the grid. Then
one or more plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map` or :meth:`FacetGrid.map_dataframe`. Finally, the
plot can be tweaked with other methods to do things like change the
axis labels, use different ticks, or add a legend. See the detailed
code examples below for more information.
.. warning::
When using seaborn functions that infer semantic mappings from a
dataset, care must be taken to synchronize those mappings across
facets (e.g., by defining the ``hue`` mapping with a palette dict or
setting the data type of the variables to ``category``). In most cases,
it will be better to use a figure-level function (e.g. :func:`relplot`
or :func:`catplot`) than to use :class:`FacetGrid` directly.
See the :ref:`tutorial <grid_tutorial>` for more information.
Parameters
----------
{data}
row, col, hue : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``{{var}}_order`` parameters to
control the order of levels of this variable.
{col_wrap}
{share_xy}
{height}
{aspect}
{palette}
{{row,col,hue}}_order : lists
Order for the levels of the faceting variables. By default, this
will be the order that the levels appear in ``data`` or, if the
variables are pandas categoricals, the category order.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
{legend_out}
despine : boolean
Remove the top and right spines from the plots.
{margin_titles}
{{x, y}}lim: tuples
Limits for each of the axes on each facet (only relevant when
share{{x, y}} is True).
subplot_kws : dict
Dictionary of keyword arguments passed to matplotlib subplot(s)
methods.
gridspec_kws : dict
Dictionary of keyword arguments passed to
:class:`matplotlib.gridspec.GridSpec`
(via :meth:`matplotlib.figure.Figure.subplots`).
Ignored if ``col_wrap`` is not ``None``.
See Also
--------
PairGrid : Subplot grid for plotting pairwise relationships
relplot : Combine a relational plot and a :class:`FacetGrid`
displot : Combine a distribution plot and a :class:`FacetGrid`
catplot : Combine a categorical plot and a :class:`FacetGrid`
lmplot : Combine a regression plot and a :class:`FacetGrid`
Examples
--------
.. note::
These examples use seaborn functions to demonstrate some of the
advanced features of the class, but in most cases you will want
to use figue-level functions (e.g. :func:`displot`, :func:`relplot`)
to make the plots shown here.
.. include:: ../docstrings/FacetGrid.rst
""").format(**_facet_docs)
def facet_data(self):
"""Generator for name indices and data subsets for each facet.
Yields
------
(i, j, k), data_ijk : tuple of ints, DataFrame
The ints provide an index into the {row, col, hue}_names attribute,
and the dataframe contains a subset of the full data corresponding
to each facet. The generator yields subsets that correspond with
the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`
is None.
"""
data = self.data
# Construct masks for the row variable
if self.row_names:
row_masks = [data[self._row_var] == n for n in self.row_names]
else:
row_masks = [np.repeat(True, len(self.data))]
# Construct masks for the column variable
if self.col_names:
col_masks = [data[self._col_var] == n for n in self.col_names]
else:
col_masks = [np.repeat(True, len(self.data))]
# Construct masks for the hue variable
if self.hue_names:
hue_masks = [data[self._hue_var] == n for n in self.hue_names]
else:
hue_masks = [np.repeat(True, len(self.data))]
# Here is the main generator loop
for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),
enumerate(col_masks),
enumerate(hue_masks)):
data_ijk = data[row & col & hue & self._not_na]
yield (i, j, k), data_ijk
def map(self, func, *args, **kwargs):
"""Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# How we use the function depends on where it comes from
func_module = str(getattr(func, "__module__", ""))
# Check for categorical plots without order information
if func_module == "seaborn.categorical":
if "order" not in kwargs:
warning = ("Using the {} function without specifying "
"`order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
if len(args) == 3 and "hue_order" not in kwargs:
warning = ("Using the {} function without specifying "
"`hue_order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not func_module.startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = utils.to_utf8(self.hue_names[hue_k])
# Get the actual data we are going to plot with
plot_data = data_ijk[list(args)]
if self._dropna:
plot_data = plot_data.dropna()
plot_args = [v for k, v in plot_data.items()]
# Some matplotlib functions don't handle pandas objects correctly
if func_module.startswith("matplotlib"):
plot_args = [v.values for v in plot_args]
# Draw the plot
self._facet_plot(func, ax, plot_args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
def map_dataframe(self, func, *args, **kwargs):
"""Like ``.map`` but passes args as strings and inserts data in kwargs.
This method is suitable for plotting with functions that accept a
long-form DataFrame as a `data` keyword argument and access the
data in that DataFrame using string variable names.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. Unlike
the `map` method, a function used here must "understand" Pandas
objects. It also must plot to the currently active matplotlib Axes
and take a `color` keyword argument. If faceting on the `hue`
dimension, it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not str(func.__module__).startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = self.hue_names[hue_k]
# Stick the facet dataframe into the kwargs
if self._dropna:
data_ijk = data_ijk.dropna()
kwargs["data"] = data_ijk
# Draw the plot
self._facet_plot(func, ax, args, kwargs)
# For axis labels, prefer to use positional args for backcompat
# but also extract the x/y kwargs and use if no corresponding arg
axis_labels = [kwargs.get("x", None), kwargs.get("y", None)]
for i, val in enumerate(args[:2]):
axis_labels[i] = val
self._finalize_grid(axis_labels)
return self
def _facet_color(self, hue_index, kw_color):
color = self._colors[hue_index]
if kw_color is not None:
return kw_color
elif color is not None:
return color
def _facet_plot(self, func, ax, plot_args, plot_kwargs):
# Draw the plot
if str(func.__module__).startswith("seaborn"):
plot_kwargs = plot_kwargs.copy()
semantics = ["x", "y", "hue", "size", "style"]
for key, val in zip(semantics, plot_args):
plot_kwargs[key] = val
plot_args = []
plot_kwargs["ax"] = ax
func(*plot_args, **plot_kwargs)
# Sort out the supporting information
self._update_legend_data(ax)
def _finalize_grid(self, axlabels):
"""Finalize the annotations and layout."""
self.set_axis_labels(*axlabels)
self.tight_layout()
def facet_axis(self, row_i, col_j, modify_state=True):
"""Make the axis identified by these indices active and return it."""
# Calculate the actual indices of the axes to plot on
if self._col_wrap is not None:
ax = self.axes.flat[col_j]
else:
ax = self.axes[row_i, col_j]
# Get a reference to the axes object we want, and make it active
if modify_state:
plt.sca(ax)
return ax
def despine(self, **kwargs):
"""Remove axis spines from the facets."""
utils.despine(self._figure, **kwargs)
return self
def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
self._x_var = x_var
self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)
if y_var is not None:
self._y_var = y_var
self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)
return self
def set_xlabels(self, label=None, clear_inner=True, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = self._x_var
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
if clear_inner:
for ax in self._not_bottom_axes:
ax.set_xlabel("")
return self
def set_ylabels(self, label=None, clear_inner=True, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = self._y_var
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
if clear_inner:
for ax in self._not_left_axes:
ax.set_ylabel("")
return self
def set_xticklabels(self, labels=None, step=None, **kwargs):
"""Set x axis tick labels of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_xticks()
ax.set_xticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_xticklabels()]
if step is not None:
xticks = ax.get_xticks()[::step]
curr_labels = curr_labels[::step]
ax.set_xticks(xticks)
ax.set_xticklabels(curr_labels, **kwargs)
else:
ax.set_xticklabels(labels, **kwargs)
return self
def set_yticklabels(self, labels=None, **kwargs):
"""Set y axis tick labels on the left column of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_yticks()
ax.set_yticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_yticklabels()]
ax.set_yticklabels(curr_labels, **kwargs)
else:
ax.set_yticklabels(labels, **kwargs)
return self
def set_titles(self, template=None, row_template=None, col_template=None,
**kwargs):
"""Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for all titles with the formatting keys {col_var} and
{col_name} (if using a `col` faceting variable) and/or {row_var}
and {row_name} (if using a `row` faceting variable).
row_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {row_var} and {row_name} formatting keys.
col_template:
Template for the column variable when titles are drawn on the grid
margins. Must have {col_var} and {col_name} formatting keys.
Returns
-------
self: object
Returns self.
"""
args = dict(row_var=self._row_var, col_var=self._col_var)
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
# Establish default templates
if row_template is None:
row_template = "{row_var} = {row_name}"
if col_template is None:
col_template = "{col_var} = {col_name}"
if template is None:
if self._row_var is None:
template = col_template
elif self._col_var is None:
template = row_template
else:
template = " | ".join([row_template, col_template])
row_template = utils.to_utf8(row_template)
col_template = utils.to_utf8(col_template)
template = utils.to_utf8(template)
if self._margin_titles:
# Remove any existing title texts
for text in self._margin_titles_texts:
text.remove()
self._margin_titles_texts = []
if self.row_names is not None:
# Draw the row titles on the right edge of the grid
for i, row_name in enumerate(self.row_names):
ax = self.axes[i, -1]
args.update(dict(row_name=row_name))
title = row_template.format(**args)
text = ax.annotate(
title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center",
**kwargs
)
self._margin_titles_texts.append(text)
if self.col_names is not None:
# Draw the column titles as normal titles
for j, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = col_template.format(**args)
self.axes[0, j].set_title(title, **kwargs)
return self
# Otherwise title each facet with all the necessary information
if (self._row_var is not None) and (self._col_var is not None):
for i, row_name in enumerate(self.row_names):
for j, col_name in enumerate(self.col_names):
args.update(dict(row_name=row_name, col_name=col_name))
title = template.format(**args)
self.axes[i, j].set_title(title, **kwargs)
elif self.row_names is not None and len(self.row_names):
for i, row_name in enumerate(self.row_names):
args.update(dict(row_name=row_name))
title = template.format(**args)
self.axes[i, 0].set_title(title, **kwargs)
elif self.col_names is not None and len(self.col_names):
for i, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = template.format(**args)
# Index the flat array so col_wrap works
self.axes.flat[i].set_title(title, **kwargs)
return self
def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):
"""Add a reference line(s) to each facet.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s). Pass ``color=None`` to
use ``hue`` mapping.
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`FacetGrid` instance
Returns ``self`` for easy method chaining.
"""
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
self.map(plt.axvline, x=x, **line_kws)
if y is not None:
self.map(plt.axhline, y=y, **line_kws)
return self
# ------ Properties that are part of the public API and documented by Sphinx
def axes(self):
"""An array of the :class:`matplotlib.axes.Axes` objects in the grid."""
return self._axes
def ax(self):
"""The :class:`matplotlib.axes.Axes` when no faceting variables are assigned."""
if self.axes.shape == (1, 1):
return self.axes[0, 0]
else:
err = (
"Use the `.axes` attribute when facet variables are assigned."
)
raise AttributeError(err)
def axes_dict(self):
"""A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.
If only one of ``row`` or ``col`` is assigned, each key is a string
representing a level of that variable. If both facet dimensions are
assigned, each key is a ``({row_level}, {col_level})`` tuple.
"""
return self._axes_dict
# ------ Private properties, that require some computation to get
def _inner_axes(self):
"""Return a flat array of the inner axes."""
if self._col_wrap is None:
return self.axes[:-1, 1:].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i % self._ncol
and i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
def _left_axes(self):
"""Return a flat array of the left column of axes."""
if self._col_wrap is None:
return self.axes[:, 0].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if not i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
def _not_left_axes(self):
"""Return a flat array of axes that aren't on the left column."""
if self._col_wrap is None:
return self.axes[:, 1:].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
def _bottom_axes(self):
"""Return a flat array of the bottom row of axes."""
if self._col_wrap is None:
return self.axes[-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i >= (self._ncol * (self._nrow - 1))
or i >= (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
def _not_bottom_axes(self):
"""Return a flat array of axes that aren't on the bottom row."""
if self._col_wrap is None:
return self.axes[:-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
def lmplot(
data=None, *,
x=None, y=None, hue=None, col=None, row=None,
palette=None, col_wrap=None, height=5, aspect=1, markers="o",
sharex=None, sharey=None, hue_order=None, col_order=None, row_order=None,
legend=True, legend_out=None, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, seed=None, order=1, logistic=False, lowess=False,
robust=False, logx=False, x_partial=None, y_partial=None,
truncate=True, x_jitter=None, y_jitter=None, scatter_kws=None,
line_kws=None, facet_kws=None,
):
if facet_kws is None:
facet_kws = {}
def facet_kw_deprecation(key, val):
msg = (
f"{key} is deprecated from the `lmplot` function signature. "
"Please update your code to pass it using `facet_kws`."
)
if val is not None:
warnings.warn(msg, UserWarning)
facet_kws[key] = val
facet_kw_deprecation("sharex", sharex)
facet_kw_deprecation("sharey", sharey)
facet_kw_deprecation("legend_out", legend_out)
if data is None:
raise TypeError("Missing required keyword argument `data`.")
# Reduce the dataframe to only needed columns
need_cols = [x, y, hue, col, row, units, x_partial, y_partial]
cols = np.unique([a for a in need_cols if a is not None]).tolist()
data = data[cols]
# Initialize the grid
facets = FacetGrid(
data, row=row, col=col, hue=hue,
palette=palette,
row_order=row_order, col_order=col_order, hue_order=hue_order,
height=height, aspect=aspect, col_wrap=col_wrap,
**facet_kws,
)
# Add the markers here as FacetGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if facets.hue_names is None:
n_markers = 1
else:
n_markers = len(facets.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError("markers must be a singleton or a list of markers "
"for each level of the hue variable")
facets.hue_kws = {"marker": markers}
def update_datalim(data, x, y, ax, **kws):
xys = data[[x, y]].to_numpy().astype(float)
ax.update_datalim(xys, updatey=False)
ax.autoscale_view(scaley=False)
facets.map_dataframe(update_datalim, x=x, y=y)
# Draw the regression plot on each facet
regplot_kws = dict(
x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,
scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,
seed=seed, order=order, logistic=logistic, lowess=lowess,
robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,
truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,
scatter_kws=scatter_kws, line_kws=line_kws,
)
facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)
facets.set_axis_labels(x, y)
# Add a legend
if legend and (hue is not None) and (hue not in [col, row]):
facets.add_legend()
return facets | null |
171,809 | import numbers
import numpy as np
import warnings
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n_units, n_units, dtype=np.intp)
sample = [[a[i] for i in resampler] for a in args]
lengths = map(len, sample[0])
resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _handle_random_seed(seed=None):
"""Given a seed in one of many formats, return a random number generator.
Generalizes across the numpy 1.17 changes, preferring newer functionality.
"""
if isinstance(seed, np.random.RandomState):
rng = seed
else:
try:
# General interface for seeding on numpy >= 1.17
rng = np.random.default_rng(seed)
except AttributeError:
# We are on numpy < 1.17, handle options ourselves
if isinstance(seed, (numbers.Integral, np.integer)):
rng = np.random.RandomState(seed)
elif seed is None:
rng = np.random.RandomState()
else:
err = "{} cannot be used to seed the random number generator"
raise ValueError(err.format(seed))
return rng
The provided code snippet includes necessary dependencies for implementing the `bootstrap` function. Write a Python function `def bootstrap(*args, **kwargs)` to solve the following problem:
Resample one or more arrays with replacement and store aggregate values. Positional arguments are a sequence of arrays to bootstrap along the first axis and pass to a summary function. Keyword arguments: n_boot : int, default=10000 Number of iterations axis : int, default=None Will pass axis to ``func`` as a keyword argument. units : array, default=None Array of sampling unit IDs. When used the bootstrap resamples units and then observations within units instead of individual datapoints. func : string or callable, default="mean" Function to call on the args that are passed in. If string, uses as name of function in the numpy namespace. If nans are present in the data, will try to use nan-aware version of named function. seed : Generator | SeedSequence | RandomState | int | None Seed for the random number generator; useful if you want reproducible resamples. Returns ------- boot_dist: array array of bootstrapped statistic values
Here is the function:
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default=10000
Number of iterations
axis : int, default=None
Will pass axis to ``func`` as a keyword argument.
units : array, default=None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
func : string or callable, default="mean"
Function to call on the args that are passed in. If string, uses as
name of function in the numpy namespace. If nans are present in the
data, will try to use nan-aware version of named function.
seed : Generator | SeedSequence | RandomState | int | None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", "mean")
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
random_seed = kwargs.get("random_seed", None)
if random_seed is not None:
msg = "`random_seed` has been renamed to `seed` and will be removed"
warnings.warn(msg)
seed = kwargs.get("seed", random_seed)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rng = _handle_random_seed(seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
if isinstance(func, str):
# Allow named numpy functions
f = getattr(np, func)
# Try to use nan-aware version of function if necessary
missing_data = np.isnan(np.sum(np.column_stack(args)))
if missing_data and not func.startswith("nan"):
nanf = getattr(np, f"nan{func}", None)
if nanf is None:
msg = f"Data contain nans but no nan-aware version of `{func}` found"
warnings.warn(msg, UserWarning)
else:
f = nanf
else:
f = func
# Handle numpy changes
try:
integers = rng.integers
except AttributeError:
integers = rng.randint
# Do the bootstrap
if units is not None:
return _structured_bootstrap(args, n_boot, units, f,
func_kwargs, integers)
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist) | Resample one or more arrays with replacement and store aggregate values. Positional arguments are a sequence of arrays to bootstrap along the first axis and pass to a summary function. Keyword arguments: n_boot : int, default=10000 Number of iterations axis : int, default=None Will pass axis to ``func`` as a keyword argument. units : array, default=None Array of sampling unit IDs. When used the bootstrap resamples units and then observations within units instead of individual datapoints. func : string or callable, default="mean" Function to call on the args that are passed in. If string, uses as name of function in the numpy namespace. If nans are present in the data, will try to use nan-aware version of named function. seed : Generator | SeedSequence | RandomState | int | None Seed for the random number generator; useful if you want reproducible resamples. Returns ------- boot_dist: array array of bootstrapped statistic values |
171,810 | from inspect import signature
def signature(obj: Callable[..., Any], *, follow_wrapped: bool = ...) -> Signature: ...
The provided code snippet includes necessary dependencies for implementing the `share_init_params_with_map` function. Write a Python function `def share_init_params_with_map(cls)` to solve the following problem:
Make cls.map a classmethod with same signature as cls.__init__.
Here is the function:
def share_init_params_with_map(cls):
"""Make cls.map a classmethod with same signature as cls.__init__."""
map_sig = signature(cls.map)
init_sig = signature(cls.__init__)
new = [v for k, v in init_sig.parameters.items() if k != "self"]
new.insert(0, map_sig.parameters["cls"])
cls.map.__signature__ = map_sig.replace(parameters=new)
cls.map.__doc__ = cls.__init__.__doc__
cls.map = classmethod(cls.map)
return cls | Make cls.map a classmethod with same signature as cls.__init__. |
171,811 | import warnings
import itertools
from copy import copy
from functools import partial
from collections import UserString
from collections.abc import Iterable, Sequence, Mapping
from numbers import Number
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib as mpl
from ._decorators import (
share_init_params_with_map,
)
from .external.version import Version
from .palettes import (
QUAL_PALETTES,
color_palette,
)
from .utils import (
_check_argument,
get_color_cycle,
remove_na,
)
def variable_type(vector, boolean_type="numeric"):
"""
Determine whether a vector contains numeric, categorical, or datetime data.
This function differs from the pandas typing API in two ways:
- Python sequences or object-typed PyData objects are considered numeric if
all of their entries are numeric.
- String or mixed-type data are considered categorical even if not
explicitly represented as a :class:`pandas.api.types.CategoricalDtype`.
Parameters
----------
vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence
Input data to test.
boolean_type : 'numeric' or 'categorical'
Type to use for vectors containing only 0s and 1s (and NAs).
Returns
-------
var_type : 'numeric', 'categorical', or 'datetime'
Name identifying the type of data in the vector.
"""
# If a categorical dtype is set, infer categorical
if pd.api.types.is_categorical_dtype(vector):
return VariableType("categorical")
# Special-case all-na data, which is always "numeric"
if pd.isna(vector).all():
return VariableType("numeric")
# Special-case binary/boolean data, allow caller to determine
# This triggers a numpy warning when vector has strings/objects
# https://github.com/numpy/numpy/issues/6784
# Because we reduce with .all(), we are agnostic about whether the
# comparison returns a scalar or vector, so we will ignore the warning.
# It triggers a separate DeprecationWarning when the vector has datetimes:
# https://github.com/numpy/numpy/issues/13548
# This is considered a bug by numpy and will likely go away.
with warnings.catch_warnings():
warnings.simplefilter(
action='ignore', category=(FutureWarning, DeprecationWarning)
)
if np.isin(vector, [0, 1, np.nan]).all():
return VariableType(boolean_type)
# Defer to positive pandas tests
if pd.api.types.is_numeric_dtype(vector):
return VariableType("numeric")
if pd.api.types.is_datetime64_dtype(vector):
return VariableType("datetime")
# --- If we get to here, we need to check the entries
# Check for a collection where everything is a number
def all_numeric(x):
for x_i in x:
if not isinstance(x_i, Number):
return False
return True
if all_numeric(vector):
return VariableType("numeric")
# Check for a collection where everything is a datetime
def all_datetime(x):
for x_i in x:
if not isinstance(x_i, (datetime, np.datetime64)):
return False
return True
if all_datetime(vector):
return VariableType("datetime")
# Otherwise, our final fallback is to consider things categorical
return VariableType("categorical")
The provided code snippet includes necessary dependencies for implementing the `infer_orient` function. Write a Python function `def infer_orient(x=None, y=None, orient=None, require_numeric=True)` to solve the following problem:
Determine how the plot should be oriented based on the data. For historical reasons, the convention is to call a plot "horizontally" or "vertically" oriented based on the axis representing its dependent variable. Practically, this is used when determining the axis for numerical aggregation. Parameters ---------- x, y : Vector data or None Positional data vectors for the plot. orient : string or None Specified orientation, which must start with "v" or "h" if not None. require_numeric : bool If set, raise when the implied dependent variable is not numeric. Returns ------- orient : "v" or "h" Raises ------ ValueError: When `orient` is not None and does not start with "h" or "v" TypeError: When dependent variable is not numeric, with `require_numeric`
Here is the function:
def infer_orient(x=None, y=None, orient=None, require_numeric=True):
"""Determine how the plot should be oriented based on the data.
For historical reasons, the convention is to call a plot "horizontally"
or "vertically" oriented based on the axis representing its dependent
variable. Practically, this is used when determining the axis for
numerical aggregation.
Parameters
----------
x, y : Vector data or None
Positional data vectors for the plot.
orient : string or None
Specified orientation, which must start with "v" or "h" if not None.
require_numeric : bool
If set, raise when the implied dependent variable is not numeric.
Returns
-------
orient : "v" or "h"
Raises
------
ValueError: When `orient` is not None and does not start with "h" or "v"
TypeError: When dependent variable is not numeric, with `require_numeric`
"""
x_type = None if x is None else variable_type(x)
y_type = None if y is None else variable_type(y)
nonnumeric_dv_error = "{} orientation requires numeric `{}` variable."
single_var_warning = "{} orientation ignored with only `{}` specified."
if x is None:
if str(orient).startswith("h"):
warnings.warn(single_var_warning.format("Horizontal", "y"))
if require_numeric and y_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
return "v"
elif y is None:
if str(orient).startswith("v"):
warnings.warn(single_var_warning.format("Vertical", "x"))
if require_numeric and x_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
return "h"
elif str(orient).startswith("v"):
if require_numeric and y_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
return "v"
elif str(orient).startswith("h"):
if require_numeric and x_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
return "h"
elif orient is not None:
err = (
"`orient` must start with 'v' or 'h' or be None, "
f"but `{repr(orient)}` was passed."
)
raise ValueError(err)
elif x_type != "categorical" and y_type == "categorical":
return "h"
elif x_type != "numeric" and y_type == "numeric":
return "v"
elif x_type == "numeric" and y_type != "numeric":
return "h"
elif require_numeric and "numeric" not in (x_type, y_type):
err = "Neither the `x` nor `y` variable appears to be numeric."
raise TypeError(err)
else:
return "v" | Determine how the plot should be oriented based on the data. For historical reasons, the convention is to call a plot "horizontally" or "vertically" oriented based on the axis representing its dependent variable. Practically, this is used when determining the axis for numerical aggregation. Parameters ---------- x, y : Vector data or None Positional data vectors for the plot. orient : string or None Specified orientation, which must start with "v" or "h" if not None. require_numeric : bool If set, raise when the implied dependent variable is not numeric. Returns ------- orient : "v" or "h" Raises ------ ValueError: When `orient` is not None and does not start with "h" or "v" TypeError: When dependent variable is not numeric, with `require_numeric` |
171,812 | import warnings
import itertools
from copy import copy
from functools import partial
from collections import UserString
from collections.abc import Iterable, Sequence, Mapping
from numbers import Number
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib as mpl
from ._decorators import (
share_init_params_with_map,
)
from .external.version import Version
from .palettes import (
QUAL_PALETTES,
color_palette,
)
from .utils import (
_check_argument,
get_color_cycle,
remove_na,
)
The provided code snippet includes necessary dependencies for implementing the `unique_dashes` function. Write a Python function `def unique_dashes(n)` to solve the following problem:
Build an arbitrarily long list of unique dash styles for lines. Parameters ---------- n : int Number of unique dash specs to generate. Returns ------- dashes : list of strings or tuples Valid arguments for the ``dashes`` parameter on :class:`matplotlib.lines.Line2D`. The first spec is a solid line (``""``), the remainder are sequences of long and short dashes.
Here is the function:
def unique_dashes(n):
"""Build an arbitrarily long list of unique dash styles for lines.
Parameters
----------
n : int
Number of unique dash specs to generate.
Returns
-------
dashes : list of strings or tuples
Valid arguments for the ``dashes`` parameter on
:class:`matplotlib.lines.Line2D`. The first spec is a solid
line (``""``), the remainder are sequences of long and short
dashes.
"""
# Start with dash specs that are well distinguishable
dashes = [
"",
(4, 1.5),
(1, 1),
(3, 1.25, 1.5, 1.25),
(5, 1, 1, 1),
]
# Now programmatically build as many as we need
p = 3
while len(dashes) < n:
# Take combinations of long and short dashes
a = itertools.combinations_with_replacement([3, 1.25], p)
b = itertools.combinations_with_replacement([4, 1], p)
# Interleave the combinations, reversing one of the streams
segment_list = itertools.chain(*zip(
list(a)[1:-1][::-1],
list(b)[1:-1]
))
# Now insert the gaps
for segments in segment_list:
gap = min(segments)
spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))
dashes.append(spec)
p += 1
return dashes[:n] | Build an arbitrarily long list of unique dash styles for lines. Parameters ---------- n : int Number of unique dash specs to generate. Returns ------- dashes : list of strings or tuples Valid arguments for the ``dashes`` parameter on :class:`matplotlib.lines.Line2D`. The first spec is a solid line (``""``), the remainder are sequences of long and short dashes. |
171,813 | import warnings
import itertools
from copy import copy
from functools import partial
from collections import UserString
from collections.abc import Iterable, Sequence, Mapping
from numbers import Number
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib as mpl
from ._decorators import (
share_init_params_with_map,
)
from .external.version import Version
from .palettes import (
QUAL_PALETTES,
color_palette,
)
from .utils import (
_check_argument,
get_color_cycle,
remove_na,
)
The provided code snippet includes necessary dependencies for implementing the `unique_markers` function. Write a Python function `def unique_markers(n)` to solve the following problem:
Build an arbitrarily long list of unique marker styles for points. Parameters ---------- n : int Number of unique marker specs to generate. Returns ------- markers : list of string or tuples Values for defining :class:`matplotlib.markers.MarkerStyle` objects. All markers will be filled.
Here is the function:
def unique_markers(n):
"""Build an arbitrarily long list of unique marker styles for points.
Parameters
----------
n : int
Number of unique marker specs to generate.
Returns
-------
markers : list of string or tuples
Values for defining :class:`matplotlib.markers.MarkerStyle` objects.
All markers will be filled.
"""
# Start with marker specs that are well distinguishable
markers = [
"o",
"X",
(4, 0, 45),
"P",
(4, 0, 0),
(4, 1, 0),
"^",
(4, 1, 45),
"v",
]
# Now generate more from regular polygons of increasing order
s = 5
while len(markers) < n:
a = 360 / (s + 1) / 2
markers.extend([
(s + 1, 1, a),
(s + 1, 0, a),
(s, 1, 0),
(s, 0, 0),
])
s += 1
# Convert to MarkerStyle object, using only exactly what we need
# markers = [mpl.markers.MarkerStyle(m) for m in markers[:n]]
return markers[:n] | Build an arbitrarily long list of unique marker styles for points. Parameters ---------- n : int Number of unique marker specs to generate. Returns ------- markers : list of string or tuples Values for defining :class:`matplotlib.markers.MarkerStyle` objects. All markers will be filled. |
171,814 | import warnings
import itertools
from copy import copy
from functools import partial
from collections import UserString
from collections.abc import Iterable, Sequence, Mapping
from numbers import Number
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib as mpl
from ._decorators import (
share_init_params_with_map,
)
from .external.version import Version
from .palettes import (
QUAL_PALETTES,
color_palette,
)
from .utils import (
_check_argument,
get_color_cycle,
remove_na,
)
def variable_type(vector, boolean_type="numeric"):
"""
Determine whether a vector contains numeric, categorical, or datetime data.
This function differs from the pandas typing API in two ways:
- Python sequences or object-typed PyData objects are considered numeric if
all of their entries are numeric.
- String or mixed-type data are considered categorical even if not
explicitly represented as a :class:`pandas.api.types.CategoricalDtype`.
Parameters
----------
vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence
Input data to test.
boolean_type : 'numeric' or 'categorical'
Type to use for vectors containing only 0s and 1s (and NAs).
Returns
-------
var_type : 'numeric', 'categorical', or 'datetime'
Name identifying the type of data in the vector.
"""
# If a categorical dtype is set, infer categorical
if pd.api.types.is_categorical_dtype(vector):
return VariableType("categorical")
# Special-case all-na data, which is always "numeric"
if pd.isna(vector).all():
return VariableType("numeric")
# Special-case binary/boolean data, allow caller to determine
# This triggers a numpy warning when vector has strings/objects
# https://github.com/numpy/numpy/issues/6784
# Because we reduce with .all(), we are agnostic about whether the
# comparison returns a scalar or vector, so we will ignore the warning.
# It triggers a separate DeprecationWarning when the vector has datetimes:
# https://github.com/numpy/numpy/issues/13548
# This is considered a bug by numpy and will likely go away.
with warnings.catch_warnings():
warnings.simplefilter(
action='ignore', category=(FutureWarning, DeprecationWarning)
)
if np.isin(vector, [0, 1, np.nan]).all():
return VariableType(boolean_type)
# Defer to positive pandas tests
if pd.api.types.is_numeric_dtype(vector):
return VariableType("numeric")
if pd.api.types.is_datetime64_dtype(vector):
return VariableType("datetime")
# --- If we get to here, we need to check the entries
# Check for a collection where everything is a number
def all_numeric(x):
for x_i in x:
if not isinstance(x_i, Number):
return False
return True
if all_numeric(vector):
return VariableType("numeric")
# Check for a collection where everything is a datetime
def all_datetime(x):
for x_i in x:
if not isinstance(x_i, (datetime, np.datetime64)):
return False
return True
if all_datetime(vector):
return VariableType("datetime")
# Otherwise, our final fallback is to consider things categorical
return VariableType("categorical")
The provided code snippet includes necessary dependencies for implementing the `categorical_order` function. Write a Python function `def categorical_order(vector, order=None)` to solve the following problem:
Return a list of unique data values. Determine an ordered list of levels in ``values``. Parameters ---------- vector : list, array, Categorical, or Series Vector of "categorical" values order : list-like, optional Desired order of category levels to override the order determined from the ``values`` object. Returns ------- order : list Ordered list of category levels not including null values.
Here is the function:
def categorical_order(vector, order=None):
"""Return a list of unique data values.
Determine an ordered list of levels in ``values``.
Parameters
----------
vector : list, array, Categorical, or Series
Vector of "categorical" values
order : list-like, optional
Desired order of category levels to override the order determined
from the ``values`` object.
Returns
-------
order : list
Ordered list of category levels not including null values.
"""
if order is None:
if hasattr(vector, "categories"):
order = vector.categories
else:
try:
order = vector.cat.categories
except (TypeError, AttributeError):
try:
order = vector.unique()
except AttributeError:
order = pd.unique(vector)
if variable_type(vector) == "numeric":
order = np.sort(order)
order = filter(pd.notnull, order)
return list(order) | Return a list of unique data values. Determine an ordered list of levels in ``values``. Parameters ---------- vector : list, array, Categorical, or Series Vector of "categorical" values order : list-like, optional Desired order of category levels to override the order determined from the ``values`` object. Returns ------- order : list Ordered list of category levels not including null values. |
171,815 | import functools
import matplotlib as mpl
from cycler import cycler
from . import palettes
The provided code snippet includes necessary dependencies for implementing the `reset_defaults` function. Write a Python function `def reset_defaults()` to solve the following problem:
Restore all RC params to default settings.
Here is the function:
def reset_defaults():
"""Restore all RC params to default settings."""
mpl.rcParams.update(mpl.rcParamsDefault) | Restore all RC params to default settings. |
171,816 | import functools
import matplotlib as mpl
from cycler import cycler
from . import palettes
The provided code snippet includes necessary dependencies for implementing the `reset_orig` function. Write a Python function `def reset_orig()` to solve the following problem:
Restore all RC params to original settings (respects custom rc).
Here is the function:
def reset_orig():
"""Restore all RC params to original settings (respects custom rc)."""
from . import _orig_rc_params
mpl.rcParams.update(_orig_rc_params) | Restore all RC params to original settings (respects custom rc). |
171,817 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
try:
from ipywidgets import interact, FloatSlider, IntSlider
except ImportError:
def interact(f):
msg = "Interactive palettes require `ipywidgets`, which is not installed."
raise ImportError(msg)
from .miscplot import palplot
from .palettes import (color_palette, dark_palette, light_palette,
diverging_palette, cubehelix_palette)
def _init_mutable_colormap():
"""Create a matplotlib colormap that will be updated by the widgets."""
greys = color_palette("Greys", 256)
cmap = LinearSegmentedColormap.from_list("interactive", greys)
cmap._init()
cmap._set_extremes()
return cmap
def _update_lut(cmap, colors):
"""Change the LUT values in a matplotlib colormap in-place."""
cmap._lut[:256] = colors
cmap._set_extremes()
def _show_cmap(cmap):
"""Show a continuous matplotlib colormap."""
from .rcmod import axes_style # Avoid circular import
with axes_style("white"):
f, ax = plt.subplots(figsize=(8.25, .75))
ax.set(xticks=[], yticks=[])
x = np.linspace(0, 1, 256)[np.newaxis, :]
ax.pcolormesh(x, cmap=cmap)
def palplot(pal, size=1):
"""Plot the values in a color palette as a horizontal array.
Parameters
----------
pal : sequence of matplotlib colors
colors, i.e. as returned by seaborn.color_palette()
size :
scaling factor for size of plot
"""
n = len(pal)
f, ax = plt.subplots(1, 1, figsize=(n * size, size))
ax.imshow(np.arange(n).reshape(1, n),
cmap=mpl.colors.ListedColormap(list(pal)),
interpolation="nearest", aspect="auto")
ax.set_xticks(np.arange(n) - .5)
ax.set_yticks([-.5, .5])
# Ensure nice border between colors
ax.set_xticklabels(["" for _ in range(n)])
# The proper way to set no ticks
ax.yaxis.set_major_locator(ticker.NullLocator())
def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):
"""Return a list of colors or continuous colormap defining a palette.
Possible ``palette`` values include:
- Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)
- Name of matplotlib colormap
- 'husl' or 'hls'
- 'ch:<cubehelix arguments>'
- 'light:<color>', 'dark:<color>', 'blend:<color>,<color>',
- A sequence of colors in any format matplotlib accepts
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
See the :ref:`tutorial <palette_tutorial>` for more information.
Parameters
----------
palette : None, string, or sequence, optional
Name of palette or None to return current palette. If a sequence, input
colors are used but possibly cycled and desaturated.
n_colors : int, optional
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors,
but grabbing the current palette or passing in a list of colors will
not change the number of colors unless this is specified. Asking for
more colors than exist in the palette will cause it to cycle. Ignored
when ``as_cmap`` is True.
desat : float, optional
Proportion to desaturate each color by.
as_cmap : bool
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
set_palette : Set the default color cycle for all plots.
set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the seaborn palettes.
Examples
--------
.. include:: ../docstrings/color_palette.rst
"""
if palette is None:
palette = get_color_cycle()
if n_colors is None:
n_colors = len(palette)
elif not isinstance(palette, str):
palette = palette
if n_colors is None:
n_colors = len(palette)
else:
if n_colors is None:
# Use all colors in a qualitative palette or 6 of another kind
n_colors = QUAL_PALETTE_SIZES.get(palette, 6)
if palette in SEABORN_PALETTES:
# Named "seaborn variant" of matplotlib default color cycle
palette = SEABORN_PALETTES[palette]
elif palette == "hls":
# Evenly spaced colors in cylindrical RGB space
palette = hls_palette(n_colors, as_cmap=as_cmap)
elif palette == "husl":
# Evenly spaced colors in cylindrical Lab space
palette = husl_palette(n_colors, as_cmap=as_cmap)
elif palette.lower() == "jet":
# Paternalism
raise ValueError("No.")
elif palette.startswith("ch:"):
# Cubehelix palette with params specified in string
args, kwargs = _parse_cubehelix_args(palette)
palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)
elif palette.startswith("light:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("dark:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("blend:"):
# blend palette between colors specified in string
_, colors = palette.split(":")
colors = colors.split(",")
palette = blend_palette(colors, n_colors, as_cmap=as_cmap)
else:
try:
# Perhaps a named matplotlib colormap?
palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)
except (ValueError, KeyError): # Error class changed in mpl36
raise ValueError(f"{palette!r} is not a valid palette name")
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
if not as_cmap:
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError(f"Could not generate a palette for {palette}")
return palette
The provided code snippet includes necessary dependencies for implementing the `choose_colorbrewer_palette` function. Write a Python function `def choose_colorbrewer_palette(data_type, as_cmap=False)` to solve the following problem:
Select a palette from the ColorBrewer set. These palettes are built into matplotlib and can be used by name in many seaborn functions, or by passing the object returned by this function. Parameters ---------- data_type : {'sequential', 'diverging', 'qualitative'} This describes the kind of data you want to visualize. See the seaborn color palette docs for more information about how to choose this value. Note that you can pass substrings (e.g. 'q' for 'qualitative. as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- dark_palette : Create a sequential palette with dark low values. light_palette : Create a sequential palette with bright low values. diverging_palette : Create a diverging palette from selected colors. cubehelix_palette : Create a sequential palette or colormap using the cubehelix system.
Here is the function:
def choose_colorbrewer_palette(data_type, as_cmap=False):
"""Select a palette from the ColorBrewer set.
These palettes are built into matplotlib and can be used by name in
many seaborn functions, or by passing the object returned by this function.
Parameters
----------
data_type : {'sequential', 'diverging', 'qualitative'}
This describes the kind of data you want to visualize. See the seaborn
color palette docs for more information about how to choose this value.
Note that you can pass substrings (e.g. 'q' for 'qualitative.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
diverging_palette : Create a diverging palette from selected colors.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
if data_type.startswith("q") and as_cmap:
raise ValueError("Qualitative palettes cannot be colormaps.")
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if data_type.startswith("s"):
opts = ["Greys", "Reds", "Greens", "Blues", "Oranges", "Purples",
"BuGn", "BuPu", "GnBu", "OrRd", "PuBu", "PuRd", "RdPu", "YlGn",
"PuBuGn", "YlGnBu", "YlOrBr", "YlOrRd"]
variants = ["regular", "reverse", "dark"]
@interact
def choose_sequential(name=opts, n=(2, 18),
desat=FloatSlider(min=0, max=1, value=1),
variant=variants):
if variant == "reverse":
name += "_r"
elif variant == "dark":
name += "_d"
if as_cmap:
colors = color_palette(name, 256, desat)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = color_palette(name, n, desat)
palplot(pal)
elif data_type.startswith("d"):
opts = ["RdBu", "RdGy", "PRGn", "PiYG", "BrBG",
"RdYlBu", "RdYlGn", "Spectral"]
variants = ["regular", "reverse"]
@interact
def choose_diverging(name=opts, n=(2, 16),
desat=FloatSlider(min=0, max=1, value=1),
variant=variants):
if variant == "reverse":
name += "_r"
if as_cmap:
colors = color_palette(name, 256, desat)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = color_palette(name, n, desat)
palplot(pal)
elif data_type.startswith("q"):
opts = ["Set1", "Set2", "Set3", "Paired", "Accent",
"Pastel1", "Pastel2", "Dark2"]
@interact
def choose_qualitative(name=opts, n=(2, 16),
desat=FloatSlider(min=0, max=1, value=1)):
pal[:] = color_palette(name, n, desat)
palplot(pal)
if as_cmap:
return cmap
return pal | Select a palette from the ColorBrewer set. These palettes are built into matplotlib and can be used by name in many seaborn functions, or by passing the object returned by this function. Parameters ---------- data_type : {'sequential', 'diverging', 'qualitative'} This describes the kind of data you want to visualize. See the seaborn color palette docs for more information about how to choose this value. Note that you can pass substrings (e.g. 'q' for 'qualitative. as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- dark_palette : Create a sequential palette with dark low values. light_palette : Create a sequential palette with bright low values. diverging_palette : Create a diverging palette from selected colors. cubehelix_palette : Create a sequential palette or colormap using the cubehelix system. |
171,818 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
try:
from ipywidgets import interact, FloatSlider, IntSlider
except ImportError:
def interact(f):
msg = "Interactive palettes require `ipywidgets`, which is not installed."
raise ImportError(msg)
from .miscplot import palplot
from .palettes import (color_palette, dark_palette, light_palette,
diverging_palette, cubehelix_palette)
def _init_mutable_colormap():
"""Create a matplotlib colormap that will be updated by the widgets."""
greys = color_palette("Greys", 256)
cmap = LinearSegmentedColormap.from_list("interactive", greys)
cmap._init()
cmap._set_extremes()
return cmap
def _update_lut(cmap, colors):
"""Change the LUT values in a matplotlib colormap in-place."""
cmap._lut[:256] = colors
cmap._set_extremes()
def _show_cmap(cmap):
"""Show a continuous matplotlib colormap."""
from .rcmod import axes_style # Avoid circular import
with axes_style("white"):
f, ax = plt.subplots(figsize=(8.25, .75))
ax.set(xticks=[], yticks=[])
x = np.linspace(0, 1, 256)[np.newaxis, :]
ax.pcolormesh(x, cmap=cmap)
def palplot(pal, size=1):
"""Plot the values in a color palette as a horizontal array.
Parameters
----------
pal : sequence of matplotlib colors
colors, i.e. as returned by seaborn.color_palette()
size :
scaling factor for size of plot
"""
n = len(pal)
f, ax = plt.subplots(1, 1, figsize=(n * size, size))
ax.imshow(np.arange(n).reshape(1, n),
cmap=mpl.colors.ListedColormap(list(pal)),
interpolation="nearest", aspect="auto")
ax.set_xticks(np.arange(n) - .5)
ax.set_yticks([-.5, .5])
# Ensure nice border between colors
ax.set_xticklabels(["" for _ in range(n)])
# The proper way to set no ticks
ax.yaxis.set_major_locator(ticker.NullLocator())
def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"):
"""Make a sequential palette that blends from dark to ``color``.
This kind of palette is good for data that range between relatively
uninteresting low values and interesting high values.
The ``color`` parameter can be specified in a number of ways, including
all options for defining a color in matplotlib and several additional
color spaces that are handled by seaborn. You can also use the database
of named colors from the XKCD color survey.
If you are using the IPython notebook, you can also choose this palette
interactively with the :func:`choose_dark_palette` function.
Parameters
----------
color : base color for high values
hex, rgb-tuple, or html color name
n_colors : int, optional
number of colors in the palette
reverse : bool, optional
if True, reverse the direction of the blend
as_cmap : bool, optional
If True, return a :class:`matplotlib.colors.ListedColormap`.
input : {'rgb', 'hls', 'husl', xkcd'}
Color space to interpret the input color. The first three options
apply to tuple inputs and the latter applies to string inputs.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
light_palette : Create a sequential palette with bright low values.
diverging_palette : Create a diverging palette with two colors.
Examples
--------
.. include:: ../docstrings/dark_palette.rst
"""
rgb = _color_to_rgb(color, input)
h, s, l = husl.rgb_to_husl(*rgb)
gray_s, gray_l = .15 * s, 15
gray = _color_to_rgb((h, gray_s, gray_l), input="husl")
colors = [rgb, gray] if reverse else [gray, rgb]
return blend_palette(colors, n_colors, as_cmap)
The provided code snippet includes necessary dependencies for implementing the `choose_dark_palette` function. Write a Python function `def choose_dark_palette(input="husl", as_cmap=False)` to solve the following problem:
Launch an interactive widget to create a dark sequential palette. This corresponds with the :func:`dark_palette` function. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. Requires IPython 2+ and must be used in the notebook. Parameters ---------- input : {'husl', 'hls', 'rgb'} Color space for defining the seed value. Note that the default is different than the default input for :func:`dark_palette`. as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- dark_palette : Create a sequential palette with dark low values. light_palette : Create a sequential palette with bright low values. cubehelix_palette : Create a sequential palette or colormap using the cubehelix system.
Here is the function:
def choose_dark_palette(input="husl", as_cmap=False):
"""Launch an interactive widget to create a dark sequential palette.
This corresponds with the :func:`dark_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
input : {'husl', 'hls', 'rgb'}
Color space for defining the seed value. Note that the default is
different than the default input for :func:`dark_palette`.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if input == "rgb":
@interact
def choose_dark_palette_rgb(r=(0., 1.),
g=(0., 1.),
b=(0., 1.),
n=(3, 17)):
color = r, g, b
if as_cmap:
colors = dark_palette(color, 256, input="rgb")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="rgb")
palplot(pal)
elif input == "hls":
@interact
def choose_dark_palette_hls(h=(0., 1.),
l=(0., 1.), # noqa: E741
s=(0., 1.),
n=(3, 17)):
color = h, l, s
if as_cmap:
colors = dark_palette(color, 256, input="hls")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="hls")
palplot(pal)
elif input == "husl":
@interact
def choose_dark_palette_husl(h=(0, 359),
s=(0, 99),
l=(0, 99), # noqa: E741
n=(3, 17)):
color = h, s, l
if as_cmap:
colors = dark_palette(color, 256, input="husl")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="husl")
palplot(pal)
if as_cmap:
return cmap
return pal | Launch an interactive widget to create a dark sequential palette. This corresponds with the :func:`dark_palette` function. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. Requires IPython 2+ and must be used in the notebook. Parameters ---------- input : {'husl', 'hls', 'rgb'} Color space for defining the seed value. Note that the default is different than the default input for :func:`dark_palette`. as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- dark_palette : Create a sequential palette with dark low values. light_palette : Create a sequential palette with bright low values. cubehelix_palette : Create a sequential palette or colormap using the cubehelix system. |
171,819 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
try:
from ipywidgets import interact, FloatSlider, IntSlider
except ImportError:
def interact(f):
msg = "Interactive palettes require `ipywidgets`, which is not installed."
raise ImportError(msg)
from .miscplot import palplot
from .palettes import (color_palette, dark_palette, light_palette,
diverging_palette, cubehelix_palette)
def _init_mutable_colormap():
"""Create a matplotlib colormap that will be updated by the widgets."""
greys = color_palette("Greys", 256)
cmap = LinearSegmentedColormap.from_list("interactive", greys)
cmap._init()
cmap._set_extremes()
return cmap
def _update_lut(cmap, colors):
"""Change the LUT values in a matplotlib colormap in-place."""
cmap._lut[:256] = colors
cmap._set_extremes()
def _show_cmap(cmap):
"""Show a continuous matplotlib colormap."""
from .rcmod import axes_style # Avoid circular import
with axes_style("white"):
f, ax = plt.subplots(figsize=(8.25, .75))
ax.set(xticks=[], yticks=[])
x = np.linspace(0, 1, 256)[np.newaxis, :]
ax.pcolormesh(x, cmap=cmap)
def palplot(pal, size=1):
"""Plot the values in a color palette as a horizontal array.
Parameters
----------
pal : sequence of matplotlib colors
colors, i.e. as returned by seaborn.color_palette()
size :
scaling factor for size of plot
"""
n = len(pal)
f, ax = plt.subplots(1, 1, figsize=(n * size, size))
ax.imshow(np.arange(n).reshape(1, n),
cmap=mpl.colors.ListedColormap(list(pal)),
interpolation="nearest", aspect="auto")
ax.set_xticks(np.arange(n) - .5)
ax.set_yticks([-.5, .5])
# Ensure nice border between colors
ax.set_xticklabels(["" for _ in range(n)])
# The proper way to set no ticks
ax.yaxis.set_major_locator(ticker.NullLocator())
def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input="rgb"):
"""Make a sequential palette that blends from light to ``color``.
The ``color`` parameter can be specified in a number of ways, including
all options for defining a color in matplotlib and several additional
color spaces that are handled by seaborn. You can also use the database
of named colors from the XKCD color survey.
If you are using a Jupyter notebook, you can also choose this palette
interactively with the :func:`choose_light_palette` function.
Parameters
----------
color : base color for high values
hex code, html color name, or tuple in `input` space.
n_colors : int, optional
number of colors in the palette
reverse : bool, optional
if True, reverse the direction of the blend
as_cmap : bool, optional
If True, return a :class:`matplotlib.colors.ListedColormap`.
input : {'rgb', 'hls', 'husl', xkcd'}
Color space to interpret the input color. The first three options
apply to tuple inputs and the latter applies to string inputs.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
dark_palette : Create a sequential palette with dark low values.
diverging_palette : Create a diverging palette with two colors.
Examples
--------
.. include:: ../docstrings/light_palette.rst
"""
rgb = _color_to_rgb(color, input)
h, s, l = husl.rgb_to_husl(*rgb)
gray_s, gray_l = .15 * s, 95
gray = _color_to_rgb((h, gray_s, gray_l), input="husl")
colors = [rgb, gray] if reverse else [gray, rgb]
return blend_palette(colors, n_colors, as_cmap)
The provided code snippet includes necessary dependencies for implementing the `choose_light_palette` function. Write a Python function `def choose_light_palette(input="husl", as_cmap=False)` to solve the following problem:
Launch an interactive widget to create a light sequential palette. This corresponds with the :func:`light_palette` function. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. Requires IPython 2+ and must be used in the notebook. Parameters ---------- input : {'husl', 'hls', 'rgb'} Color space for defining the seed value. Note that the default is different than the default input for :func:`light_palette`. as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- light_palette : Create a sequential palette with bright low values. dark_palette : Create a sequential palette with dark low values. cubehelix_palette : Create a sequential palette or colormap using the cubehelix system.
Here is the function:
def choose_light_palette(input="husl", as_cmap=False):
"""Launch an interactive widget to create a light sequential palette.
This corresponds with the :func:`light_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
input : {'husl', 'hls', 'rgb'}
Color space for defining the seed value. Note that the default is
different than the default input for :func:`light_palette`.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
light_palette : Create a sequential palette with bright low values.
dark_palette : Create a sequential palette with dark low values.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if input == "rgb":
@interact
def choose_light_palette_rgb(r=(0., 1.),
g=(0., 1.),
b=(0., 1.),
n=(3, 17)):
color = r, g, b
if as_cmap:
colors = light_palette(color, 256, input="rgb")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="rgb")
palplot(pal)
elif input == "hls":
@interact
def choose_light_palette_hls(h=(0., 1.),
l=(0., 1.), # noqa: E741
s=(0., 1.),
n=(3, 17)):
color = h, l, s
if as_cmap:
colors = light_palette(color, 256, input="hls")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="hls")
palplot(pal)
elif input == "husl":
@interact
def choose_light_palette_husl(h=(0, 359),
s=(0, 99),
l=(0, 99), # noqa: E741
n=(3, 17)):
color = h, s, l
if as_cmap:
colors = light_palette(color, 256, input="husl")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="husl")
palplot(pal)
if as_cmap:
return cmap
return pal | Launch an interactive widget to create a light sequential palette. This corresponds with the :func:`light_palette` function. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. Requires IPython 2+ and must be used in the notebook. Parameters ---------- input : {'husl', 'hls', 'rgb'} Color space for defining the seed value. Note that the default is different than the default input for :func:`light_palette`. as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- light_palette : Create a sequential palette with bright low values. dark_palette : Create a sequential palette with dark low values. cubehelix_palette : Create a sequential palette or colormap using the cubehelix system. |
171,820 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
try:
from ipywidgets import interact, FloatSlider, IntSlider
except ImportError:
def interact(f):
msg = "Interactive palettes require `ipywidgets`, which is not installed."
raise ImportError(msg)
from .miscplot import palplot
from .palettes import (color_palette, dark_palette, light_palette,
diverging_palette, cubehelix_palette)
def _init_mutable_colormap():
"""Create a matplotlib colormap that will be updated by the widgets."""
greys = color_palette("Greys", 256)
cmap = LinearSegmentedColormap.from_list("interactive", greys)
cmap._init()
cmap._set_extremes()
return cmap
def _update_lut(cmap, colors):
"""Change the LUT values in a matplotlib colormap in-place."""
cmap._lut[:256] = colors
cmap._set_extremes()
def _show_cmap(cmap):
"""Show a continuous matplotlib colormap."""
from .rcmod import axes_style # Avoid circular import
with axes_style("white"):
f, ax = plt.subplots(figsize=(8.25, .75))
ax.set(xticks=[], yticks=[])
x = np.linspace(0, 1, 256)[np.newaxis, :]
ax.pcolormesh(x, cmap=cmap)
def palplot(pal, size=1):
"""Plot the values in a color palette as a horizontal array.
Parameters
----------
pal : sequence of matplotlib colors
colors, i.e. as returned by seaborn.color_palette()
size :
scaling factor for size of plot
"""
n = len(pal)
f, ax = plt.subplots(1, 1, figsize=(n * size, size))
ax.imshow(np.arange(n).reshape(1, n),
cmap=mpl.colors.ListedColormap(list(pal)),
interpolation="nearest", aspect="auto")
ax.set_xticks(np.arange(n) - .5)
ax.set_yticks([-.5, .5])
# Ensure nice border between colors
ax.set_xticklabels(["" for _ in range(n)])
# The proper way to set no ticks
ax.yaxis.set_major_locator(ticker.NullLocator())
def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, # noqa
center="light", as_cmap=False):
"""Make a diverging palette between two HUSL colors.
If you are using the IPython notebook, you can also choose this palette
interactively with the :func:`choose_diverging_palette` function.
Parameters
----------
h_neg, h_pos : float in [0, 359]
Anchor hues for negative and positive extents of the map.
s : float in [0, 100], optional
Anchor saturation for both extents of the map.
l : float in [0, 100], optional
Anchor lightness for both extents of the map.
sep : int, optional
Size of the intermediate region.
n : int, optional
Number of colors in the palette (if not returning a cmap)
center : {"light", "dark"}, optional
Whether the center of the palette is light or dark
as_cmap : bool, optional
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
dark_palette : Create a sequential palette with dark values.
light_palette : Create a sequential palette with light values.
Examples
--------
.. include: ../docstrings/diverging_palette.rst
"""
palfunc = dict(dark=dark_palette, light=light_palette)[center]
n_half = int(128 - (sep // 2))
neg = palfunc((h_neg, s, l), n_half, reverse=True, input="husl")
pos = palfunc((h_pos, s, l), n_half, input="husl")
midpoint = dict(light=[(.95, .95, .95)], dark=[(.133, .133, .133)])[center]
mid = midpoint * sep
pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)
return pal
The provided code snippet includes necessary dependencies for implementing the `choose_diverging_palette` function. Write a Python function `def choose_diverging_palette(as_cmap=False)` to solve the following problem:
Launch an interactive widget to choose a diverging color palette. This corresponds with the :func:`diverging_palette` function. This kind of palette is good for data that range between interesting low values and interesting high values with a meaningful midpoint. (For example, change scores relative to some baseline value). Requires IPython 2+ and must be used in the notebook. Parameters ---------- as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- diverging_palette : Create a diverging color palette or colormap. choose_colorbrewer_palette : Interactively choose palettes from the colorbrewer set, including diverging palettes.
Here is the function:
def choose_diverging_palette(as_cmap=False):
"""Launch an interactive widget to choose a diverging color palette.
This corresponds with the :func:`diverging_palette` function. This kind
of palette is good for data that range between interesting low values
and interesting high values with a meaningful midpoint. (For example,
change scores relative to some baseline value).
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
diverging_palette : Create a diverging color palette or colormap.
choose_colorbrewer_palette : Interactively choose palettes from the
colorbrewer set, including diverging palettes.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
@interact
def choose_diverging_palette(
h_neg=IntSlider(min=0,
max=359,
value=220),
h_pos=IntSlider(min=0,
max=359,
value=10),
s=IntSlider(min=0, max=99, value=74),
l=IntSlider(min=0, max=99, value=50), # noqa: E741
sep=IntSlider(min=1, max=50, value=10),
n=(2, 16),
center=["light", "dark"]
):
if as_cmap:
colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)
palplot(pal)
if as_cmap:
return cmap
return pal | Launch an interactive widget to choose a diverging color palette. This corresponds with the :func:`diverging_palette` function. This kind of palette is good for data that range between interesting low values and interesting high values with a meaningful midpoint. (For example, change scores relative to some baseline value). Requires IPython 2+ and must be used in the notebook. Parameters ---------- as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- diverging_palette : Create a diverging color palette or colormap. choose_colorbrewer_palette : Interactively choose palettes from the colorbrewer set, including diverging palettes. |
171,821 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
try:
from ipywidgets import interact, FloatSlider, IntSlider
except ImportError:
def interact(f):
msg = "Interactive palettes require `ipywidgets`, which is not installed."
raise ImportError(msg)
from .miscplot import palplot
from .palettes import (color_palette, dark_palette, light_palette,
diverging_palette, cubehelix_palette)
def _init_mutable_colormap():
"""Create a matplotlib colormap that will be updated by the widgets."""
greys = color_palette("Greys", 256)
cmap = LinearSegmentedColormap.from_list("interactive", greys)
cmap._init()
cmap._set_extremes()
return cmap
def _update_lut(cmap, colors):
"""Change the LUT values in a matplotlib colormap in-place."""
cmap._lut[:256] = colors
cmap._set_extremes()
def _show_cmap(cmap):
"""Show a continuous matplotlib colormap."""
from .rcmod import axes_style # Avoid circular import
with axes_style("white"):
f, ax = plt.subplots(figsize=(8.25, .75))
ax.set(xticks=[], yticks=[])
x = np.linspace(0, 1, 256)[np.newaxis, :]
ax.pcolormesh(x, cmap=cmap)
def palplot(pal, size=1):
"""Plot the values in a color palette as a horizontal array.
Parameters
----------
pal : sequence of matplotlib colors
colors, i.e. as returned by seaborn.color_palette()
size :
scaling factor for size of plot
"""
n = len(pal)
f, ax = plt.subplots(1, 1, figsize=(n * size, size))
ax.imshow(np.arange(n).reshape(1, n),
cmap=mpl.colors.ListedColormap(list(pal)),
interpolation="nearest", aspect="auto")
ax.set_xticks(np.arange(n) - .5)
ax.set_yticks([-.5, .5])
# Ensure nice border between colors
ax.set_xticklabels(["" for _ in range(n)])
# The proper way to set no ticks
ax.yaxis.set_major_locator(ticker.NullLocator())
def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8,
light=.85, dark=.15, reverse=False, as_cmap=False):
"""Make a sequential palette from the cubehelix system.
This produces a colormap with linearly-decreasing (or increasing)
brightness. That means that information will be preserved if printed to
black and white or viewed by someone who is colorblind. "cubehelix" is
also available as a matplotlib-based palette, but this function gives the
user more control over the look of the palette and has a different set of
defaults.
In addition to using this function, it is also possible to generate a
cubehelix palette generally in seaborn using a string starting with
`ch:` and containing other parameters (e.g. `"ch:s=.25,r=-.5"`).
Parameters
----------
n_colors : int
Number of colors in the palette.
start : float, 0 <= start <= 3
The hue value at the start of the helix.
rot : float
Rotations around the hue wheel over the range of the palette.
gamma : float 0 <= gamma
Nonlinearity to emphasize dark (gamma < 1) or light (gamma > 1) colors.
hue : float, 0 <= hue <= 1
Saturation of the colors.
dark : float 0 <= dark <= 1
Intensity of the darkest color in the palette.
light : float 0 <= light <= 1
Intensity of the lightest color in the palette.
reverse : bool
If True, the palette will go from dark to light.
as_cmap : bool
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
choose_cubehelix_palette : Launch an interactive widget to select cubehelix
palette parameters.
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
References
----------
Green, D. A. (2011). "A colour scheme for the display of astronomical
intensity images". Bulletin of the Astromical Society of India, Vol. 39,
p. 289-295.
Examples
--------
.. include:: ../docstrings/cubehelix_palette.rst
"""
def get_color_function(p0, p1):
# Copied from matplotlib because it lives in private module
def color(x):
# Apply gamma factor to emphasise low or high intensity values
xg = x ** gamma
# Calculate amplitude and angle of deviation from the black
# to white diagonal in the plane of constant
# perceived intensity.
a = hue * xg * (1 - xg) / 2
phi = 2 * np.pi * (start / 3 + rot * x)
return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
return color
cdict = {
"red": get_color_function(-0.14861, 1.78277),
"green": get_color_function(-0.29227, -0.90649),
"blue": get_color_function(1.97294, 0.0),
}
cmap = mpl.colors.LinearSegmentedColormap("cubehelix", cdict)
x = np.linspace(light, dark, int(n_colors))
pal = cmap(x)[:, :3].tolist()
if reverse:
pal = pal[::-1]
if as_cmap:
x_256 = np.linspace(light, dark, 256)
if reverse:
x_256 = x_256[::-1]
pal_256 = cmap(x_256)
cmap = mpl.colors.ListedColormap(pal_256, "seaborn_cubehelix")
return cmap
else:
return _ColorPalette(pal)
The provided code snippet includes necessary dependencies for implementing the `choose_cubehelix_palette` function. Write a Python function `def choose_cubehelix_palette(as_cmap=False)` to solve the following problem:
Launch an interactive widget to create a sequential cubehelix palette. This corresponds with the :func:`cubehelix_palette` function. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. The cubehelix system allows the palette to have more hue variance across the range, which can be helpful for distinguishing a wider range of values. Requires IPython 2+ and must be used in the notebook. Parameters ---------- as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- cubehelix_palette : Create a sequential palette or colormap using the cubehelix system.
Here is the function:
def choose_cubehelix_palette(as_cmap=False):
"""Launch an interactive widget to create a sequential cubehelix palette.
This corresponds with the :func:`cubehelix_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values. The cubehelix system allows the
palette to have more hue variance across the range, which can be helpful
for distinguishing a wider range of values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
@interact
def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),
start=FloatSlider(min=0, max=3, value=0),
rot=FloatSlider(min=-1, max=1, value=.4),
gamma=FloatSlider(min=0, max=5, value=1),
hue=FloatSlider(min=0, max=1, value=.8),
light=FloatSlider(min=0, max=1, value=.85),
dark=FloatSlider(min=0, max=1, value=.15),
reverse=False):
if as_cmap:
colors = cubehelix_palette(256, start, rot, gamma,
hue, light, dark, reverse)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = cubehelix_palette(n_colors, start, rot, gamma,
hue, light, dark, reverse)
palplot(pal)
if as_cmap:
return cmap
return pal | Launch an interactive widget to create a sequential cubehelix palette. This corresponds with the :func:`cubehelix_palette` function. This kind of palette is good for data that range between relatively uninteresting low values and interesting high values. The cubehelix system allows the palette to have more hue variance across the range, which can be helpful for distinguishing a wider range of values. Requires IPython 2+ and must be used in the notebook. Parameters ---------- as_cmap : bool If True, the return value is a matplotlib colormap rather than a list of discrete colors. Returns ------- pal or cmap : list of colors or matplotlib colormap Object that can be passed to plotting functions. See Also -------- cubehelix_palette : Create a sequential palette or colormap using the cubehelix system. |
171,822 | import numpy as np
import matplotlib as mpl
from seaborn.external.version import Version
The provided code snippet includes necessary dependencies for implementing the `MarkerStyle` function. Write a Python function `def MarkerStyle(marker=None, fillstyle=None)` to solve the following problem:
Allow MarkerStyle to accept a MarkerStyle object as parameter. Supports matplotlib < 3.3.0 https://github.com/matplotlib/matplotlib/pull/16692
Here is the function:
def MarkerStyle(marker=None, fillstyle=None):
"""
Allow MarkerStyle to accept a MarkerStyle object as parameter.
Supports matplotlib < 3.3.0
https://github.com/matplotlib/matplotlib/pull/16692
"""
if isinstance(marker, mpl.markers.MarkerStyle):
if fillstyle is None:
return marker
else:
marker = marker.get_marker()
return mpl.markers.MarkerStyle(marker, fillstyle) | Allow MarkerStyle to accept a MarkerStyle object as parameter. Supports matplotlib < 3.3.0 https://github.com/matplotlib/matplotlib/pull/16692 |
171,823 | import numpy as np
import matplotlib as mpl
from seaborn.external.version import Version
The provided code snippet includes necessary dependencies for implementing the `norm_from_scale` function. Write a Python function `def norm_from_scale(scale, norm)` to solve the following problem:
Produce a Normalize object given a Scale and min/max domain limits.
Here is the function:
def norm_from_scale(scale, norm):
"""Produce a Normalize object given a Scale and min/max domain limits."""
# This is an internal maplotlib function that simplifies things to access
# It is likely to become part of the matplotlib API at some point:
# https://github.com/matplotlib/matplotlib/issues/20329
if isinstance(norm, mpl.colors.Normalize):
return norm
if scale is None:
return None
if norm is None:
vmin = vmax = None
else:
vmin, vmax = norm # TODO more helpful error if this fails?
class ScaledNorm(mpl.colors.Normalize):
def __call__(self, value, clip=None):
# From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py
# See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE
value, is_scalar = self.process_value(value)
self.autoscale_None(value)
if self.vmin > self.vmax:
raise ValueError("vmin must be less or equal to vmax")
if self.vmin == self.vmax:
return np.full_like(value, 0)
if clip is None:
clip = self.clip
if clip:
value = np.clip(value, self.vmin, self.vmax)
# ***** Seaborn changes start ****
t_value = self.transform(value).reshape(np.shape(value))
t_vmin, t_vmax = self.transform([self.vmin, self.vmax])
# ***** Seaborn changes end *****
if not np.isfinite([t_vmin, t_vmax]).all():
raise ValueError("Invalid vmin or vmax")
t_value -= t_vmin
t_value /= (t_vmax - t_vmin)
t_value = np.ma.masked_invalid(t_value, copy=False)
return t_value[0] if is_scalar else t_value
new_norm = ScaledNorm(vmin, vmax)
new_norm.transform = scale.get_transform().transform
return new_norm | Produce a Normalize object given a Scale and min/max domain limits. |
171,824 | import numpy as np
import matplotlib as mpl
from seaborn.external.version import Version
class Version(_BaseVersion):
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None:
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
return f"<Version('{self}')>"
def __str__(self) -> str:
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
def epoch(self) -> int:
_epoch: int = self._version.epoch
return _epoch
def release(self) -> Tuple[int, ...]:
_release: Tuple[int, ...] = self._version.release
return _release
def pre(self) -> Optional[Tuple[str, int]]:
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
def post(self) -> Optional[int]:
return self._version.post[1] if self._version.post else None
def dev(self) -> Optional[int]:
return self._version.dev[1] if self._version.dev else None
def local(self) -> Optional[str]:
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
def public(self) -> str:
return str(self).split("+", 1)[0]
def base_version(self) -> str:
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
def is_prerelease(self) -> bool:
return self.dev is not None or self.pre is not None
def is_postrelease(self) -> bool:
return self.post is not None
def is_devrelease(self) -> bool:
return self.dev is not None
def major(self) -> int:
return self.release[0] if len(self.release) >= 1 else 0
def minor(self) -> int:
return self.release[1] if len(self.release) >= 2 else 0
def micro(self) -> int:
return self.release[2] if len(self.release) >= 3 else 0
The provided code snippet includes necessary dependencies for implementing the `scale_factory` function. Write a Python function `def scale_factory(scale, axis, **kwargs)` to solve the following problem:
Backwards compatability for creation of independent scales. Matplotlib scales require an Axis object for instantiation on < 3.4. But the axis is not used, aside from extraction of the axis_name in LogScale.
Here is the function:
def scale_factory(scale, axis, **kwargs):
"""
Backwards compatability for creation of independent scales.
Matplotlib scales require an Axis object for instantiation on < 3.4.
But the axis is not used, aside from extraction of the axis_name in LogScale.
"""
modify_transform = False
if Version(mpl.__version__) < Version("3.4"):
if axis[0] in "xy":
modify_transform = True
axis = axis[0]
base = kwargs.pop("base", None)
if base is not None:
kwargs[f"base{axis}"] = base
nonpos = kwargs.pop("nonpositive", None)
if nonpos is not None:
kwargs[f"nonpos{axis}"] = nonpos
if isinstance(scale, str):
class Axis:
axis_name = axis
axis = Axis()
scale = mpl.scale.scale_factory(scale, axis, **kwargs)
if modify_transform:
transform = scale.get_transform()
transform.base = kwargs.get("base", 10)
if kwargs.get("nonpositive") == "mask":
# Setting a private attribute, but we only get here
# on an old matplotlib, so this won't break going forwards
transform._clip = False
return scale | Backwards compatability for creation of independent scales. Matplotlib scales require an Axis object for instantiation on < 3.4. But the axis is not used, aside from extraction of the axis_name in LogScale. |
171,825 | import numpy as np
import matplotlib as mpl
from seaborn.external.version import Version
class Version(_BaseVersion):
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None:
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
return f"<Version('{self}')>"
def __str__(self) -> str:
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
def epoch(self) -> int:
_epoch: int = self._version.epoch
return _epoch
def release(self) -> Tuple[int, ...]:
_release: Tuple[int, ...] = self._version.release
return _release
def pre(self) -> Optional[Tuple[str, int]]:
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
def post(self) -> Optional[int]:
return self._version.post[1] if self._version.post else None
def dev(self) -> Optional[int]:
return self._version.dev[1] if self._version.dev else None
def local(self) -> Optional[str]:
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
def public(self) -> str:
return str(self).split("+", 1)[0]
def base_version(self) -> str:
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
def is_prerelease(self) -> bool:
return self.dev is not None or self.pre is not None
def is_postrelease(self) -> bool:
return self.post is not None
def is_devrelease(self) -> bool:
return self.dev is not None
def major(self) -> int:
return self.release[0] if len(self.release) >= 1 else 0
def minor(self) -> int:
return self.release[1] if len(self.release) >= 2 else 0
def micro(self) -> int:
return self.release[2] if len(self.release) >= 3 else 0
The provided code snippet includes necessary dependencies for implementing the `set_scale_obj` function. Write a Python function `def set_scale_obj(ax, axis, scale)` to solve the following problem:
Handle backwards compatability with setting matplotlib scale.
Here is the function:
def set_scale_obj(ax, axis, scale):
"""Handle backwards compatability with setting matplotlib scale."""
if Version(mpl.__version__) < Version("3.4"):
# The ability to pass a BaseScale instance to Axes.set_{}scale was added
# to matplotlib in version 3.4.0: GH: matplotlib/matplotlib/pull/19089
# Workaround: use the scale name, which is restrictive only if the user
# wants to define a custom scale; they'll need to update the registry too.
if scale.name is None:
# Hack to support our custom Formatter-less CatScale
return
method = getattr(ax, f"set_{axis}scale")
kws = {}
if scale.name == "function":
trans = scale.get_transform()
kws["functions"] = (trans._forward, trans._inverse)
method(scale.name, **kws)
axis_obj = getattr(ax, f"{axis}axis")
scale.set_default_locators_and_formatters(axis_obj)
else:
ax.set(**{f"{axis}scale": scale}) | Handle backwards compatability with setting matplotlib scale. |
171,826 | import numpy as np
import matplotlib as mpl
from seaborn.external.version import Version
The provided code snippet includes necessary dependencies for implementing the `register_colormap` function. Write a Python function `def register_colormap(name, cmap)` to solve the following problem:
Handle changes to matplotlib colormap interface in 3.6.
Here is the function:
def register_colormap(name, cmap):
"""Handle changes to matplotlib colormap interface in 3.6."""
try:
if name not in mpl.colormaps:
mpl.colormaps.register(cmap, name=name)
except AttributeError:
mpl.cm.register_cmap(name, cmap) | Handle changes to matplotlib colormap interface in 3.6. |
171,827 | import numpy as np
import matplotlib as mpl
from seaborn.external.version import Version
The provided code snippet includes necessary dependencies for implementing the `set_layout_engine` function. Write a Python function `def set_layout_engine(fig, engine)` to solve the following problem:
Handle changes to auto layout engine interface in 3.6
Here is the function:
def set_layout_engine(fig, engine):
"""Handle changes to auto layout engine interface in 3.6"""
if hasattr(fig, "set_layout_engine"):
fig.set_layout_engine(engine)
else:
if engine == "tight":
fig.set_tight_layout(True)
elif engine == "constrained":
fig.set_constrained_layout(True) | Handle changes to auto layout engine interface in 3.6 |
171,828 | import numpy as np
import matplotlib as mpl
from seaborn.external.version import Version
class Version(_BaseVersion):
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None:
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
return f"<Version('{self}')>"
def __str__(self) -> str:
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
def epoch(self) -> int:
_epoch: int = self._version.epoch
return _epoch
def release(self) -> Tuple[int, ...]:
_release: Tuple[int, ...] = self._version.release
return _release
def pre(self) -> Optional[Tuple[str, int]]:
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
def post(self) -> Optional[int]:
return self._version.post[1] if self._version.post else None
def dev(self) -> Optional[int]:
return self._version.dev[1] if self._version.dev else None
def local(self) -> Optional[str]:
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
def public(self) -> str:
return str(self).split("+", 1)[0]
def base_version(self) -> str:
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
def is_prerelease(self) -> bool:
return self.dev is not None or self.pre is not None
def is_postrelease(self) -> bool:
return self.post is not None
def is_devrelease(self) -> bool:
return self.dev is not None
def major(self) -> int:
return self.release[0] if len(self.release) >= 1 else 0
def minor(self) -> int:
return self.release[1] if len(self.release) >= 2 else 0
def micro(self) -> int:
return self.release[2] if len(self.release) >= 3 else 0
The provided code snippet includes necessary dependencies for implementing the `share_axis` function. Write a Python function `def share_axis(ax0, ax1, which)` to solve the following problem:
Handle changes to post-hoc axis sharing.
Here is the function:
def share_axis(ax0, ax1, which):
"""Handle changes to post-hoc axis sharing."""
if Version(mpl.__version__) < Version("3.5.0"):
group = getattr(ax0, f"get_shared_{which}_axes")()
group.join(ax1, ax0)
else:
getattr(ax1, f"share{which}")(ax0) | Handle changes to post-hoc axis sharing. |
171,829 | from __future__ import annotations
from dataclasses import dataclass, fields, field
import textwrap
from typing import Any, Callable, Union
from collections.abc import Generator
import numpy as np
import pandas as pd
import matplotlib as mpl
from numpy import ndarray
from pandas import DataFrame
from matplotlib.artist import Artist
from seaborn._core.scales import Scale
from seaborn._core.properties import (
PROPERTIES,
Property,
RGBATuple,
DashPattern,
DashPatternWithOffset,
)
from seaborn._core.exceptions import PlotSpecError
class Mark:
"""Base class for objects that visually represent data."""
artist_kws: dict = field(default_factory=dict)
def _mappable_props(self):
return {
f.name: getattr(self, f.name) for f in fields(self)
if isinstance(f.default, Mappable)
}
def _grouping_props(self):
# TODO does it make sense to have variation within a Mark's
# properties about whether they are grouping?
return [
f.name for f in fields(self)
if isinstance(f.default, Mappable) and f.default.grouping
]
# TODO make this method private? Would extender every need to call directly?
def _resolve(
self,
data: DataFrame | dict[str, Any],
name: str,
scales: dict[str, Scale] | None = None,
) -> Any:
"""Obtain default, specified, or mapped value for a named feature.
Parameters
----------
data : DataFrame or dict with scalar values
Container with data values for features that will be semantically mapped.
name : string
Identity of the feature / semantic.
scales: dict
Mapping from variable to corresponding scale object.
Returns
-------
value or array of values
Outer return type depends on whether `data` is a dict (implying that
we want a single value) or DataFrame (implying that we want an array
of values with matching length).
"""
feature = self._mappable_props[name]
prop = PROPERTIES.get(name, Property(name))
directly_specified = not isinstance(feature, Mappable)
return_multiple = isinstance(data, pd.DataFrame)
return_array = return_multiple and not name.endswith("style")
# Special case width because it needs to be resolved and added to the dataframe
# during layer prep (so the Move operations use it properly).
# TODO how does width *scaling* work, e.g. for violin width by count?
if name == "width":
directly_specified = directly_specified and name not in data
if directly_specified:
feature = prop.standardize(feature)
if return_multiple:
feature = [feature] * len(data)
if return_array:
feature = np.array(feature)
return feature
if name in data:
if scales is None or name not in scales:
# TODO Might this obviate the identity scale? Just don't add a scale?
feature = data[name]
else:
scale = scales[name]
value = data[name]
try:
feature = scale(value)
except Exception as err:
raise PlotSpecError._during("Scaling operation", name) from err
if return_array:
feature = np.asarray(feature)
return feature
if feature.depend is not None:
# TODO add source_func or similar to transform the source value?
# e.g. set linewidth as a proportion of pointsize?
return self._resolve(data, feature.depend, scales)
default = prop.standardize(feature.default)
if return_multiple:
default = [default] * len(data)
if return_array:
default = np.array(default)
return default
def _infer_orient(self, scales: dict) -> str: # TODO type scales
# TODO The original version of this (in seaborn._oldcore) did more checking.
# Paring that down here for the prototype to see what restrictions make sense.
# TODO rethink this to map from scale type to "DV priority" and use that?
# e.g. Nominal > Discrete > Continuous
x = 0 if "x" not in scales else scales["x"]._priority
y = 0 if "y" not in scales else scales["y"]._priority
if y > x:
return "y"
else:
return "x"
def _plot(
self,
split_generator: Callable[[], Generator],
scales: dict[str, Scale],
orient: str,
) -> None:
"""Main interface for creating a plot."""
raise NotImplementedError()
def _legend_artist(
self, variables: list[str], value: Any, scales: dict[str, Scale],
) -> Artist:
return None
Any = object()
class Scale:
"""Base class for objects that map data values to visual properties."""
values: tuple | str | list | dict | None
_priority: ClassVar[int]
_pipeline: Pipeline
_matplotlib_scale: ScaleBase
_spacer: staticmethod
_legend: tuple[list[Any], list[str]] | None
def __post_init__(self):
self._tick_params = None
self._label_params = None
self._legend = None
def tick(self):
raise NotImplementedError()
def label(self):
raise NotImplementedError()
def _get_locators(self):
raise NotImplementedError()
def _get_formatter(self, locator: Locator | None = None):
raise NotImplementedError()
def _get_scale(self, name: str, forward: Callable, inverse: Callable):
major_locator, minor_locator = self._get_locators(**self._tick_params)
major_formatter = self._get_formatter(major_locator, **self._label_params)
class InternalScale(mpl.scale.FuncScale):
def set_default_locators_and_formatters(self, axis):
axis.set_major_locator(major_locator)
if minor_locator is not None:
axis.set_minor_locator(minor_locator)
axis.set_major_formatter(major_formatter)
return InternalScale(name, (forward, inverse))
def _spacing(self, x: Series) -> float:
space = self._spacer(x)
if np.isnan(space):
# This happens when there is no variance in the orient coordinate data
# Not exactly clear what the right default is, but 1 seems reasonable?
return 1
return space
def _setup(
self, data: Series, prop: Property, axis: Axis | None = None,
) -> Scale:
raise NotImplementedError()
def _finalize(self, p: Plot, axis: Axis) -> None:
"""Perform scale-specific axis tweaks after adding artists."""
pass
def __call__(self, data: Series) -> ArrayLike:
trans_data: Series | NDArray | list
# TODO sometimes we need to handle scalars (e.g. for Line)
# but what is the best way to do that?
scalar_data = np.isscalar(data)
if scalar_data:
trans_data = np.array([data])
else:
trans_data = data
for func in self._pipeline:
if func is not None:
trans_data = func(trans_data)
if scalar_data:
return trans_data[0]
else:
return trans_data
def _identity():
class Identity(Scale):
_pipeline = []
_spacer = None
_legend = None
_matplotlib_scale = None
return Identity()
def resolve_properties(
mark: Mark, data: DataFrame, scales: dict[str, Scale]
) -> dict[str, Any]:
props = {
name: mark._resolve(data, name, scales) for name in mark._mappable_props
}
return props | null |
171,830 | from __future__ import annotations
from dataclasses import dataclass, fields, field
import textwrap
from typing import Any, Callable, Union
from collections.abc import Generator
import numpy as np
import pandas as pd
import matplotlib as mpl
from numpy import ndarray
from pandas import DataFrame
from matplotlib.artist import Artist
from seaborn._core.scales import Scale
from seaborn._core.properties import (
PROPERTIES,
Property,
RGBATuple,
DashPattern,
DashPatternWithOffset,
)
from seaborn._core.exceptions import PlotSpecError
class Mark:
"""Base class for objects that visually represent data."""
artist_kws: dict = field(default_factory=dict)
def _mappable_props(self):
return {
f.name: getattr(self, f.name) for f in fields(self)
if isinstance(f.default, Mappable)
}
def _grouping_props(self):
# TODO does it make sense to have variation within a Mark's
# properties about whether they are grouping?
return [
f.name for f in fields(self)
if isinstance(f.default, Mappable) and f.default.grouping
]
# TODO make this method private? Would extender every need to call directly?
def _resolve(
self,
data: DataFrame | dict[str, Any],
name: str,
scales: dict[str, Scale] | None = None,
) -> Any:
"""Obtain default, specified, or mapped value for a named feature.
Parameters
----------
data : DataFrame or dict with scalar values
Container with data values for features that will be semantically mapped.
name : string
Identity of the feature / semantic.
scales: dict
Mapping from variable to corresponding scale object.
Returns
-------
value or array of values
Outer return type depends on whether `data` is a dict (implying that
we want a single value) or DataFrame (implying that we want an array
of values with matching length).
"""
feature = self._mappable_props[name]
prop = PROPERTIES.get(name, Property(name))
directly_specified = not isinstance(feature, Mappable)
return_multiple = isinstance(data, pd.DataFrame)
return_array = return_multiple and not name.endswith("style")
# Special case width because it needs to be resolved and added to the dataframe
# during layer prep (so the Move operations use it properly).
# TODO how does width *scaling* work, e.g. for violin width by count?
if name == "width":
directly_specified = directly_specified and name not in data
if directly_specified:
feature = prop.standardize(feature)
if return_multiple:
feature = [feature] * len(data)
if return_array:
feature = np.array(feature)
return feature
if name in data:
if scales is None or name not in scales:
# TODO Might this obviate the identity scale? Just don't add a scale?
feature = data[name]
else:
scale = scales[name]
value = data[name]
try:
feature = scale(value)
except Exception as err:
raise PlotSpecError._during("Scaling operation", name) from err
if return_array:
feature = np.asarray(feature)
return feature
if feature.depend is not None:
# TODO add source_func or similar to transform the source value?
# e.g. set linewidth as a proportion of pointsize?
return self._resolve(data, feature.depend, scales)
default = prop.standardize(feature.default)
if return_multiple:
default = [default] * len(data)
if return_array:
default = np.array(default)
return default
def _infer_orient(self, scales: dict) -> str: # TODO type scales
# TODO The original version of this (in seaborn._oldcore) did more checking.
# Paring that down here for the prototype to see what restrictions make sense.
# TODO rethink this to map from scale type to "DV priority" and use that?
# e.g. Nominal > Discrete > Continuous
x = 0 if "x" not in scales else scales["x"]._priority
y = 0 if "y" not in scales else scales["y"]._priority
if y > x:
return "y"
else:
return "x"
def _plot(
self,
split_generator: Callable[[], Generator],
scales: dict[str, Scale],
orient: str,
) -> None:
"""Main interface for creating a plot."""
raise NotImplementedError()
def _legend_artist(
self, variables: list[str], value: Any, scales: dict[str, Scale],
) -> Artist:
return None
class Scale:
"""Base class for objects that map data values to visual properties."""
values: tuple | str | list | dict | None
_priority: ClassVar[int]
_pipeline: Pipeline
_matplotlib_scale: ScaleBase
_spacer: staticmethod
_legend: tuple[list[Any], list[str]] | None
def __post_init__(self):
self._tick_params = None
self._label_params = None
self._legend = None
def tick(self):
raise NotImplementedError()
def label(self):
raise NotImplementedError()
def _get_locators(self):
raise NotImplementedError()
def _get_formatter(self, locator: Locator | None = None):
raise NotImplementedError()
def _get_scale(self, name: str, forward: Callable, inverse: Callable):
major_locator, minor_locator = self._get_locators(**self._tick_params)
major_formatter = self._get_formatter(major_locator, **self._label_params)
class InternalScale(mpl.scale.FuncScale):
def set_default_locators_and_formatters(self, axis):
axis.set_major_locator(major_locator)
if minor_locator is not None:
axis.set_minor_locator(minor_locator)
axis.set_major_formatter(major_formatter)
return InternalScale(name, (forward, inverse))
def _spacing(self, x: Series) -> float:
space = self._spacer(x)
if np.isnan(space):
# This happens when there is no variance in the orient coordinate data
# Not exactly clear what the right default is, but 1 seems reasonable?
return 1
return space
def _setup(
self, data: Series, prop: Property, axis: Axis | None = None,
) -> Scale:
raise NotImplementedError()
def _finalize(self, p: Plot, axis: Axis) -> None:
"""Perform scale-specific axis tweaks after adding artists."""
pass
def __call__(self, data: Series) -> ArrayLike:
trans_data: Series | NDArray | list
# TODO sometimes we need to handle scalars (e.g. for Line)
# but what is the best way to do that?
scalar_data = np.isscalar(data)
if scalar_data:
trans_data = np.array([data])
else:
trans_data = data
for func in self._pipeline:
if func is not None:
trans_data = func(trans_data)
if scalar_data:
return trans_data[0]
else:
return trans_data
def _identity():
class Identity(Scale):
_pipeline = []
_spacer = None
_legend = None
_matplotlib_scale = None
return Identity()
RGBATuple = Tuple[float, float, float, float]
The provided code snippet includes necessary dependencies for implementing the `resolve_color` function. Write a Python function `def resolve_color( mark: Mark, data: DataFrame | dict, prefix: str = "", scales: dict[str, Scale] | None = None, ) -> RGBATuple | ndarray` to solve the following problem:
Obtain a default, specified, or mapped value for a color feature. This method exists separately to support the relationship between a color and its corresponding alpha. We want to respect alpha values that are passed in specified (or mapped) color values but also make use of a separate `alpha` variable, which can be mapped. This approach may also be extended to support mapping of specific color channels (i.e. luminance, chroma) in the future. Parameters ---------- mark : Mark with the color property. data : Container with data values for features that will be semantically mapped. prefix : Support "color", "fillcolor", etc.
Here is the function:
def resolve_color(
mark: Mark,
data: DataFrame | dict,
prefix: str = "",
scales: dict[str, Scale] | None = None,
) -> RGBATuple | ndarray:
"""
Obtain a default, specified, or mapped value for a color feature.
This method exists separately to support the relationship between a
color and its corresponding alpha. We want to respect alpha values that
are passed in specified (or mapped) color values but also make use of a
separate `alpha` variable, which can be mapped. This approach may also
be extended to support mapping of specific color channels (i.e.
luminance, chroma) in the future.
Parameters
----------
mark :
Mark with the color property.
data :
Container with data values for features that will be semantically mapped.
prefix :
Support "color", "fillcolor", etc.
"""
color = mark._resolve(data, f"{prefix}color", scales)
if f"{prefix}alpha" in mark._mappable_props:
alpha = mark._resolve(data, f"{prefix}alpha", scales)
else:
alpha = mark._resolve(data, "alpha", scales)
def visible(x, axis=None):
"""Detect "invisible" colors to set alpha appropriately."""
# TODO First clause only needed to handle non-rgba arrays,
# which we are trying to handle upstream
return np.array(x).dtype.kind != "f" or np.isfinite(x).all(axis)
# Second check here catches vectors of strings with identity scale
# It could probably be handled better upstream. This is a tricky problem
if np.ndim(color) < 2 and all(isinstance(x, float) for x in color):
if len(color) == 4:
return mpl.colors.to_rgba(color)
alpha = alpha if visible(color) else np.nan
return mpl.colors.to_rgba(color, alpha)
else:
if np.ndim(color) == 2 and color.shape[1] == 4:
return mpl.colors.to_rgba_array(color)
alpha = np.where(visible(color, axis=1), alpha, np.nan)
return mpl.colors.to_rgba_array(color, alpha)
# TODO should we be implementing fill here too?
# (i.e. set fillalpha to 0 when fill=False) | Obtain a default, specified, or mapped value for a color feature. This method exists separately to support the relationship between a color and its corresponding alpha. We want to respect alpha values that are passed in specified (or mapped) color values but also make use of a separate `alpha` variable, which can be mapped. This approach may also be extended to support mapping of specific color channels (i.e. luminance, chroma) in the future. Parameters ---------- mark : Mark with the color property. data : Container with data values for features that will be semantically mapped. prefix : Support "color", "fillcolor", etc. |
171,831 | from __future__ import annotations
from dataclasses import dataclass, fields, field
import textwrap
from typing import Any, Callable, Union
from collections.abc import Generator
import numpy as np
import pandas as pd
import matplotlib as mpl
from numpy import ndarray
from pandas import DataFrame
from matplotlib.artist import Artist
from seaborn._core.scales import Scale
from seaborn._core.properties import (
PROPERTIES,
Property,
RGBATuple,
DashPattern,
DashPatternWithOffset,
)
from seaborn._core.exceptions import PlotSpecError
class Mappable:
def __init__(
self,
val: Any = None,
depend: str | None = None,
rc: str | None = None,
auto: bool = False,
grouping: bool = True,
):
"""
Property that can be mapped from data or set directly, with flexible defaults.
Parameters
----------
val : Any
Use this value as the default.
depend : str
Use the value of this feature as the default.
rc : str
Use the value of this rcParam as the default.
auto : bool
The default value will depend on other parameters at compile time.
grouping : bool
If True, use the mapped variable to define groups.
"""
if depend is not None:
assert depend in PROPERTIES
if rc is not None:
assert rc in mpl.rcParams
self._val = val
self._rc = rc
self._depend = depend
self._auto = auto
self._grouping = grouping
def __repr__(self):
"""Nice formatting for when object appears in Mark init signature."""
if self._val is not None:
s = f"<{repr(self._val)}>"
elif self._depend is not None:
s = f"<depend:{self._depend}>"
elif self._rc is not None:
s = f"<rc:{self._rc}>"
elif self._auto:
s = "<auto>"
else:
s = "<undefined>"
return s
def depend(self) -> Any:
"""Return the name of the feature to source a default value from."""
return self._depend
def grouping(self) -> bool:
return self._grouping
def default(self) -> Any:
"""Get the default value for this feature, or access the relevant rcParam."""
if self._val is not None:
return self._val
return mpl.rcParams.get(self._rc)
def fields(class_or_instance: Any) -> Tuple[Field[Any], ...]: ...
def document_properties(mark):
properties = [f.name for f in fields(mark) if isinstance(f.default, Mappable)]
text = [
"",
" This mark defines the following properties:",
textwrap.fill(
", ".join([f"|{p}|" for p in properties]),
width=78, initial_indent=" " * 8, subsequent_indent=" " * 8,
),
]
docstring_lines = mark.__doc__.split("\n")
new_docstring = "\n".join([
*docstring_lines[:2],
*text,
*docstring_lines[2:],
])
mark.__doc__ = new_docstring
return mark | null |
171,832 | import warnings
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from . import cm
from .axisgrid import Grid
from ._compat import get_colormap
from .utils import (
despine,
axis_ticklabels_overlap,
relative_luminance,
to_utf8,
_draw_figure,
)
def to_utf8(obj):
"""Return a string representing a Python object.
Strings (i.e. type ``str``) are returned unchanged.
Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings.
For other objects, the method ``__str__()`` is called, and the result is
returned as a string.
Parameters
----------
obj : object
Any Python object
Returns
-------
s : str
UTF-8-decoded string representation of ``obj``
"""
if isinstance(obj, str):
return obj
try:
return obj.decode(encoding="utf-8")
except AttributeError: # obj is not bytes-like
return str(obj)
The provided code snippet includes necessary dependencies for implementing the `_index_to_label` function. Write a Python function `def _index_to_label(index)` to solve the following problem:
Convert a pandas index or multiindex to an axis label.
Here is the function:
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label."""
if isinstance(index, pd.MultiIndex):
return "-".join(map(to_utf8, index.names))
else:
return index.name | Convert a pandas index or multiindex to an axis label. |
171,833 | import warnings
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from . import cm
from .axisgrid import Grid
from ._compat import get_colormap
from .utils import (
despine,
axis_ticklabels_overlap,
relative_luminance,
to_utf8,
_draw_figure,
)
def to_utf8(obj):
"""Return a string representing a Python object.
Strings (i.e. type ``str``) are returned unchanged.
Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings.
For other objects, the method ``__str__()`` is called, and the result is
returned as a string.
Parameters
----------
obj : object
Any Python object
Returns
-------
s : str
UTF-8-decoded string representation of ``obj``
"""
if isinstance(obj, str):
return obj
try:
return obj.decode(encoding="utf-8")
except AttributeError: # obj is not bytes-like
return str(obj)
The provided code snippet includes necessary dependencies for implementing the `_index_to_ticklabels` function. Write a Python function `def _index_to_ticklabels(index)` to solve the following problem:
Convert a pandas index or multiindex into ticklabels.
Here is the function:
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels."""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(to_utf8, i)) for i in index.values]
else:
return index.values | Convert a pandas index or multiindex into ticklabels. |
171,834 | import warnings
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from . import cm
from .axisgrid import Grid
from ._compat import get_colormap
from .utils import (
despine,
axis_ticklabels_overlap,
relative_luminance,
to_utf8,
_draw_figure,
)
The provided code snippet includes necessary dependencies for implementing the `_convert_colors` function. Write a Python function `def _convert_colors(colors)` to solve the following problem:
Convert either a list of colors or nested lists of colors to RGB.
Here is the function:
def _convert_colors(colors):
"""Convert either a list of colors or nested lists of colors to RGB."""
to_rgb = mpl.colors.to_rgb
try:
to_rgb(colors[0])
# If this works, there is only one level of colors
return list(map(to_rgb, colors))
except ValueError:
# If we get here, we have nested lists
return [list(map(to_rgb, l)) for l in colors] | Convert either a list of colors or nested lists of colors to RGB. |
171,835 | import warnings
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from . import cm
from .axisgrid import Grid
from ._compat import get_colormap
from .utils import (
despine,
axis_ticklabels_overlap,
relative_luminance,
to_utf8,
_draw_figure,
)
The provided code snippet includes necessary dependencies for implementing the `_matrix_mask` function. Write a Python function `def _matrix_mask(data, mask)` to solve the following problem:
Ensure that data and mask are compatible and add missing values. Values will be plotted for cells where ``mask`` is ``False``. ``data`` is expected to be a DataFrame; ``mask`` can be an array or a DataFrame.
Here is the function:
def _matrix_mask(data, mask):
"""Ensure that data and mask are compatible and add missing values.
Values will be plotted for cells where ``mask`` is ``False``.
``data`` is expected to be a DataFrame; ``mask`` can be an array or
a DataFrame.
"""
if mask is None:
mask = np.zeros(data.shape, bool)
if isinstance(mask, np.ndarray):
# For array masks, ensure that shape matches data then convert
if mask.shape != data.shape:
raise ValueError("Mask must have the same shape as data.")
mask = pd.DataFrame(mask,
index=data.index,
columns=data.columns,
dtype=bool)
elif isinstance(mask, pd.DataFrame):
# For DataFrame masks, ensure that semantic labels match data
if not mask.index.equals(data.index) \
and mask.columns.equals(data.columns):
err = "Mask must have the same index and columns as data."
raise ValueError(err)
# Add any cells with missing data to the mask
# This works around an issue where `plt.pcolormesh` doesn't represent
# missing data properly
mask = mask | pd.isnull(data)
return mask | Ensure that data and mask are compatible and add missing values. Values will be plotted for cells where ``mask`` is ``False``. ``data`` is expected to be a DataFrame; ``mask`` can be an array or a DataFrame. |
171,836 | import warnings
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from . import cm
from .axisgrid import Grid
from ._compat import get_colormap
from .utils import (
despine,
axis_ticklabels_overlap,
relative_luminance,
to_utf8,
_draw_figure,
)
class _HeatMapper:
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Validate the mask and convert to DataFrame
mask = _matrix_mask(data, mask)
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
# Get good names for the rows and columns
xtickevery = 1
if isinstance(xticklabels, int):
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is True:
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is False:
xticklabels = []
ytickevery = 1
if isinstance(yticklabels, int):
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is True:
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is False:
yticklabels = []
if not len(xticklabels):
self.xticks = []
self.xticklabels = []
elif isinstance(xticklabels, str) and xticklabels == "auto":
self.xticks = "auto"
self.xticklabels = _index_to_ticklabels(data.columns)
else:
self.xticks, self.xticklabels = self._skip_ticks(xticklabels,
xtickevery)
if not len(yticklabels):
self.yticks = []
self.yticklabels = []
elif isinstance(yticklabels, str) and yticklabels == "auto":
self.yticks = "auto"
self.yticklabels = _index_to_ticklabels(data.index)
else:
self.yticks, self.yticklabels = self._skip_ticks(yticklabels,
ytickevery)
# Get good names for the axis labels
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Sort out the annotations
if annot is None or annot is False:
annot = False
annot_data = None
else:
if isinstance(annot, bool):
annot_data = plot_data
else:
annot_data = np.asarray(annot)
if annot_data.shape != plot_data.shape:
err = "`data` and `annot` must have same shape."
raise ValueError(err)
annot = True
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.annot_data = annot_data
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws.copy()
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws.copy()
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
# plot_data is a np.ma.array instance
calc_data = plot_data.astype(float).filled(np.nan)
if vmin is None:
if robust:
vmin = np.nanpercentile(calc_data, 2)
else:
vmin = np.nanmin(calc_data)
if vmax is None:
if robust:
vmax = np.nanpercentile(calc_data, 98)
else:
vmax = np.nanmax(calc_data)
self.vmin, self.vmax = vmin, vmax
# Choose default colormaps if not provided
if cmap is None:
if center is None:
self.cmap = cm.rocket
else:
self.cmap = cm.icefire
elif isinstance(cmap, str):
self.cmap = get_colormap(cmap)
elif isinstance(cmap, list):
self.cmap = mpl.colors.ListedColormap(cmap)
else:
self.cmap = cmap
# Recenter a divergent colormap
if center is not None:
# Copy bad values
# in mpl<3.2 only masked values are honored with "bad" color spec
# (see https://github.com/matplotlib/matplotlib/pull/14257)
bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]
# under/over values are set for sure when cmap extremes
# do not map to the same color as +-inf
under = self.cmap(-np.inf)
over = self.cmap(np.inf)
under_set = under != self.cmap(0)
over_set = over != self.cmap(self.cmap.N - 1)
vrange = max(vmax - center, center - vmin)
normlize = mpl.colors.Normalize(center - vrange, center + vrange)
cmin, cmax = normlize([vmin, vmax])
cc = np.linspace(cmin, cmax, 256)
self.cmap = mpl.colors.ListedColormap(self.cmap(cc))
self.cmap.set_bad(bad)
if under_set:
self.cmap.set_under(under)
if over_set:
self.cmap.set_over(over)
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
mesh.update_scalarmappable()
height, width = self.annot_data.shape
xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)
for x, y, m, color, val in zip(xpos.flat, ypos.flat,
mesh.get_array(), mesh.get_facecolors(),
self.annot_data.flat):
if m is not np.ma.masked:
lum = relative_luminance(color)
text_color = ".15" if lum > .408 else "w"
annotation = ("{:" + self.fmt + "}").format(val)
text_kwargs = dict(color=text_color, ha="center", va="center")
text_kwargs.update(self.annot_kws)
ax.text(x, y, annotation, **text_kwargs)
def _skip_ticks(self, labels, tickevery):
"""Return ticks and labels at evenly spaced intervals."""
n = len(labels)
if tickevery == 0:
ticks, labels = [], []
elif tickevery == 1:
ticks, labels = np.arange(n) + .5, labels
else:
start, end, step = 0, n, tickevery
ticks = np.arange(start, end, step) + .5
labels = labels[start:end:step]
return ticks, labels
def _auto_ticks(self, ax, labels, axis):
"""Determine ticks and ticklabels that minimize overlap."""
transform = ax.figure.dpi_scale_trans.inverted()
bbox = ax.get_window_extent().transformed(transform)
size = [bbox.width, bbox.height][axis]
axis = [ax.xaxis, ax.yaxis][axis]
tick, = axis.set_ticks([0])
fontsize = tick.label1.get_size()
max_ticks = int(size // (fontsize / 72))
if max_ticks < 1:
return [], []
tick_every = len(labels) // max_ticks + 1
tick_every = 1 if tick_every == 0 else tick_every
ticks, labels = self._skip_ticks(labels, tick_every)
return ticks, labels
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# setting vmin/vmax in addition to norm is deprecated
# so avoid setting if norm is set
if "norm" not in kws:
kws.setdefault("vmin", self.vmin)
kws.setdefault("vmax", self.vmax)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Invert the y axis to show the plot in matrix form
ax.invert_yaxis()
# Possibly add a colorbar
if self.cbar:
cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)
cb.outline.set_linewidth(0)
# If rasterized is passed to pcolormesh, also rasterize the
# colorbar to avoid white lines on the PDF rendering
if kws.get('rasterized', False):
cb.solids.set_rasterized(True)
# Add row and column labels
if isinstance(self.xticks, str) and self.xticks == "auto":
xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)
else:
xticks, xticklabels = self.xticks, self.xticklabels
if isinstance(self.yticks, str) and self.yticks == "auto":
yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)
else:
yticks, yticklabels = self.yticks, self.yticklabels
ax.set(xticks=xticks, yticks=yticks)
xtl = ax.set_xticklabels(xticklabels)
ytl = ax.set_yticklabels(yticklabels, rotation="vertical")
plt.setp(ytl, va="center") # GH2484
# Possibly rotate them if they overlap
_draw_figure(ax.figure)
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
The provided code snippet includes necessary dependencies for implementing the `heatmap` function. Write a Python function `def heatmap( data, *, vmin=None, vmax=None, cmap=None, center=None, robust=False, annot=None, fmt=".2g", annot_kws=None, linewidths=0, linecolor="white", cbar=True, cbar_kws=None, cbar_ax=None, square=False, xticklabels="auto", yticklabels="auto", mask=None, ax=None, **kwargs )` to solve the following problem:
Plot rectangular data as a color-encoded matrix. This is an Axes-level function and will draw the heatmap into the currently-active Axes if none is provided to the ``ax`` argument. Part of this Axes space will be taken and used to plot a colormap, unless ``cbar`` is False or a separate Axes is provided to ``cbar_ax``. Parameters ---------- data : rectangular dataset 2D dataset that can be coerced into an ndarray. If a Pandas DataFrame is provided, the index/column information will be used to label the columns and rows. vmin, vmax : floats, optional Values to anchor the colormap, otherwise they are inferred from the data and other keyword arguments. cmap : matplotlib colormap name or object, or list of colors, optional The mapping from data values to color space. If not provided, the default will depend on whether ``center`` is set. center : float, optional The value at which to center the colormap when plotting divergent data. Using this parameter will change the default ``cmap`` if none is specified. robust : bool, optional If True and ``vmin`` or ``vmax`` are absent, the colormap range is computed with robust quantiles instead of the extreme values. annot : bool or rectangular dataset, optional If True, write the data value in each cell. If an array-like with the same shape as ``data``, then use this to annotate the heatmap instead of the data. Note that DataFrames will match on position, not index. fmt : str, optional String formatting code to use when adding annotations. annot_kws : dict of key, value mappings, optional Keyword arguments for :meth:`matplotlib.axes.Axes.text` when ``annot`` is True. linewidths : float, optional Width of the lines that will divide each cell. linecolor : color, optional Color of the lines that will divide each cell. cbar : bool, optional Whether to draw a colorbar. cbar_kws : dict of key, value mappings, optional Keyword arguments for :meth:`matplotlib.figure.Figure.colorbar`. cbar_ax : matplotlib Axes, optional Axes in which to draw the colorbar, otherwise take space from the main Axes. square : bool, optional If True, set the Axes aspect to "equal" so each cell will be square-shaped. xticklabels, yticklabels : "auto", bool, list-like, or int, optional If True, plot the column names of the dataframe. If False, don't plot the column names. If list-like, plot these alternate labels as the xticklabels. If an integer, use the column names but plot only every n label. If "auto", try to densely plot non-overlapping labels. mask : bool array or DataFrame, optional If passed, data will not be shown in cells where ``mask`` is True. Cells with missing values are automatically masked. ax : matplotlib Axes, optional Axes in which to draw the plot, otherwise use the currently-active Axes. kwargs : other keyword arguments All other keyword arguments are passed to :meth:`matplotlib.axes.Axes.pcolormesh`. Returns ------- ax : matplotlib Axes Axes object with the heatmap. See Also -------- clustermap : Plot a matrix using hierarchical clustering to arrange the rows and columns. Examples -------- .. include:: ../docstrings/heatmap.rst
Here is the function:
def heatmap(
data, *,
vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=None, fmt=".2g", annot_kws=None,
linewidths=0, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, xticklabels="auto", yticklabels="auto",
mask=None, ax=None,
**kwargs
):
"""Plot rectangular data as a color-encoded matrix.
This is an Axes-level function and will draw the heatmap into the
currently-active Axes if none is provided to the ``ax`` argument. Part of
this Axes space will be taken and used to plot a colormap, unless ``cbar``
is False or a separate Axes is provided to ``cbar_ax``.
Parameters
----------
data : rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments.
cmap : matplotlib colormap name or object, or list of colors, optional
The mapping from data values to color space. If not provided, the
default will depend on whether ``center`` is set.
center : float, optional
The value at which to center the colormap when plotting divergent data.
Using this parameter will change the default ``cmap`` if none is
specified.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with robust quantiles instead of the extreme values.
annot : bool or rectangular dataset, optional
If True, write the data value in each cell. If an array-like with the
same shape as ``data``, then use this to annotate the heatmap instead
of the data. Note that DataFrames will match on position, not index.
fmt : str, optional
String formatting code to use when adding annotations.
annot_kws : dict of key, value mappings, optional
Keyword arguments for :meth:`matplotlib.axes.Axes.text` when ``annot``
is True.
linewidths : float, optional
Width of the lines that will divide each cell.
linecolor : color, optional
Color of the lines that will divide each cell.
cbar : bool, optional
Whether to draw a colorbar.
cbar_kws : dict of key, value mappings, optional
Keyword arguments for :meth:`matplotlib.figure.Figure.colorbar`.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar, otherwise take space from the
main Axes.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
xticklabels, yticklabels : "auto", bool, list-like, or int, optional
If True, plot the column names of the dataframe. If False, don't plot
the column names. If list-like, plot these alternate labels as the
xticklabels. If an integer, use the column names but plot only every
n label. If "auto", try to densely plot non-overlapping labels.
mask : bool array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
kwargs : other keyword arguments
All other keyword arguments are passed to
:meth:`matplotlib.axes.Axes.pcolormesh`.
Returns
-------
ax : matplotlib Axes
Axes object with the heatmap.
See Also
--------
clustermap : Plot a matrix using hierarchical clustering to arrange the
rows and columns.
Examples
--------
.. include:: ../docstrings/heatmap.rst
"""
# Initialize the plotter object
plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws, xticklabels,
yticklabels, mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax | Plot rectangular data as a color-encoded matrix. This is an Axes-level function and will draw the heatmap into the currently-active Axes if none is provided to the ``ax`` argument. Part of this Axes space will be taken and used to plot a colormap, unless ``cbar`` is False or a separate Axes is provided to ``cbar_ax``. Parameters ---------- data : rectangular dataset 2D dataset that can be coerced into an ndarray. If a Pandas DataFrame is provided, the index/column information will be used to label the columns and rows. vmin, vmax : floats, optional Values to anchor the colormap, otherwise they are inferred from the data and other keyword arguments. cmap : matplotlib colormap name or object, or list of colors, optional The mapping from data values to color space. If not provided, the default will depend on whether ``center`` is set. center : float, optional The value at which to center the colormap when plotting divergent data. Using this parameter will change the default ``cmap`` if none is specified. robust : bool, optional If True and ``vmin`` or ``vmax`` are absent, the colormap range is computed with robust quantiles instead of the extreme values. annot : bool or rectangular dataset, optional If True, write the data value in each cell. If an array-like with the same shape as ``data``, then use this to annotate the heatmap instead of the data. Note that DataFrames will match on position, not index. fmt : str, optional String formatting code to use when adding annotations. annot_kws : dict of key, value mappings, optional Keyword arguments for :meth:`matplotlib.axes.Axes.text` when ``annot`` is True. linewidths : float, optional Width of the lines that will divide each cell. linecolor : color, optional Color of the lines that will divide each cell. cbar : bool, optional Whether to draw a colorbar. cbar_kws : dict of key, value mappings, optional Keyword arguments for :meth:`matplotlib.figure.Figure.colorbar`. cbar_ax : matplotlib Axes, optional Axes in which to draw the colorbar, otherwise take space from the main Axes. square : bool, optional If True, set the Axes aspect to "equal" so each cell will be square-shaped. xticklabels, yticklabels : "auto", bool, list-like, or int, optional If True, plot the column names of the dataframe. If False, don't plot the column names. If list-like, plot these alternate labels as the xticklabels. If an integer, use the column names but plot only every n label. If "auto", try to densely plot non-overlapping labels. mask : bool array or DataFrame, optional If passed, data will not be shown in cells where ``mask`` is True. Cells with missing values are automatically masked. ax : matplotlib Axes, optional Axes in which to draw the plot, otherwise use the currently-active Axes. kwargs : other keyword arguments All other keyword arguments are passed to :meth:`matplotlib.axes.Axes.pcolormesh`. Returns ------- ax : matplotlib Axes Axes object with the heatmap. See Also -------- clustermap : Plot a matrix using hierarchical clustering to arrange the rows and columns. Examples -------- .. include:: ../docstrings/heatmap.rst |
171,837 | import warnings
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from . import cm
from .axisgrid import Grid
from ._compat import get_colormap
from .utils import (
despine,
axis_ticklabels_overlap,
relative_luminance,
to_utf8,
_draw_figure,
)
class _DendrogramPlotter:
"""Object for drawing tree of similarities between data rows/columns"""
def __init__(self, data, linkage, metric, method, axis, label, rotate):
"""Plot a dendrogram of the relationships between the columns of data
Parameters
----------
data : pandas.DataFrame
Rectangular data
"""
self.axis = axis
if self.axis == 1:
data = data.T
if isinstance(data, pd.DataFrame):
array = data.values
else:
array = np.asarray(data)
data = pd.DataFrame(array)
self.array = array
self.data = data
self.shape = self.data.shape
self.metric = metric
self.method = method
self.axis = axis
self.label = label
self.rotate = rotate
if linkage is None:
self.linkage = self.calculated_linkage
else:
self.linkage = linkage
self.dendrogram = self.calculate_dendrogram()
# Dendrogram ends are always at multiples of 5, who knows why
ticks = 10 * np.arange(self.data.shape[0]) + 5
if self.label:
ticklabels = _index_to_ticklabels(self.data.index)
ticklabels = [ticklabels[i] for i in self.reordered_ind]
if self.rotate:
self.xticks = []
self.yticks = ticks
self.xticklabels = []
self.yticklabels = ticklabels
self.ylabel = _index_to_label(self.data.index)
self.xlabel = ''
else:
self.xticks = ticks
self.yticks = []
self.xticklabels = ticklabels
self.yticklabels = []
self.ylabel = ''
self.xlabel = _index_to_label(self.data.index)
else:
self.xticks, self.yticks = [], []
self.yticklabels, self.xticklabels = [], []
self.xlabel, self.ylabel = '', ''
self.dependent_coord = self.dendrogram['dcoord']
self.independent_coord = self.dendrogram['icoord']
def _calculate_linkage_scipy(self):
linkage = hierarchy.linkage(self.array, method=self.method,
metric=self.metric)
return linkage
def _calculate_linkage_fastcluster(self):
import fastcluster
# Fastcluster has a memory-saving vectorized version, but only
# with certain linkage methods, and mostly with euclidean metric
# vector_methods = ('single', 'centroid', 'median', 'ward')
euclidean_methods = ('centroid', 'median', 'ward')
euclidean = self.metric == 'euclidean' and self.method in \
euclidean_methods
if euclidean or self.method == 'single':
return fastcluster.linkage_vector(self.array,
method=self.method,
metric=self.metric)
else:
linkage = fastcluster.linkage(self.array, method=self.method,
metric=self.metric)
return linkage
def calculated_linkage(self):
try:
return self._calculate_linkage_fastcluster()
except ImportError:
if np.product(self.shape) >= 10000:
msg = ("Clustering large matrix with scipy. Installing "
"`fastcluster` may give better performance.")
warnings.warn(msg)
return self._calculate_linkage_scipy()
def calculate_dendrogram(self):
"""Calculates a dendrogram based on the linkage matrix
Made a separate function, not a property because don't want to
recalculate the dendrogram every time it is accessed.
Returns
-------
dendrogram : dict
Dendrogram dictionary as returned by scipy.cluster.hierarchy
.dendrogram. The important key-value pairing is
"reordered_ind" which indicates the re-ordering of the matrix
"""
return hierarchy.dendrogram(self.linkage, no_plot=True,
color_threshold=-np.inf)
def reordered_ind(self):
"""Indices of the matrix, reordered by the dendrogram"""
return self.dendrogram['leaves']
def plot(self, ax, tree_kws):
"""Plots a dendrogram of the similarities between data on the axes
Parameters
----------
ax : matplotlib.axes.Axes
Axes object upon which the dendrogram is plotted
"""
tree_kws = {} if tree_kws is None else tree_kws.copy()
tree_kws.setdefault("linewidths", .5)
tree_kws.setdefault("colors", tree_kws.pop("color", (.2, .2, .2)))
if self.rotate and self.axis == 0:
coords = zip(self.dependent_coord, self.independent_coord)
else:
coords = zip(self.independent_coord, self.dependent_coord)
lines = LineCollection([list(zip(x, y)) for x, y in coords],
**tree_kws)
ax.add_collection(lines)
number_of_leaves = len(self.reordered_ind)
max_dependent_coord = max(map(max, self.dependent_coord))
if self.rotate:
ax.yaxis.set_ticks_position('right')
# Constants 10 and 1.05 come from
# `scipy.cluster.hierarchy._plot_dendrogram`
ax.set_ylim(0, number_of_leaves * 10)
ax.set_xlim(0, max_dependent_coord * 1.05)
ax.invert_xaxis()
ax.invert_yaxis()
else:
# Constants 10 and 1.05 come from
# `scipy.cluster.hierarchy._plot_dendrogram`
ax.set_xlim(0, number_of_leaves * 10)
ax.set_ylim(0, max_dependent_coord * 1.05)
despine(ax=ax, bottom=True, left=True)
ax.set(xticks=self.xticks, yticks=self.yticks,
xlabel=self.xlabel, ylabel=self.ylabel)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')
# Force a draw of the plot to avoid matplotlib window error
_draw_figure(ax.figure)
if len(ytl) > 0 and axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
if len(xtl) > 0 and axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
return self
The provided code snippet includes necessary dependencies for implementing the `dendrogram` function. Write a Python function `def dendrogram( data, *, linkage=None, axis=1, label=True, metric='euclidean', method='average', rotate=False, tree_kws=None, ax=None )` to solve the following problem:
Draw a tree diagram of relationships within a matrix Parameters ---------- data : pandas.DataFrame Rectangular data linkage : numpy.array, optional Linkage matrix axis : int, optional Which axis to use to calculate linkage. 0 is rows, 1 is columns. label : bool, optional If True, label the dendrogram at leaves with column or row names metric : str, optional Distance metric. Anything valid for scipy.spatial.distance.pdist method : str, optional Linkage method to use. Anything valid for scipy.cluster.hierarchy.linkage rotate : bool, optional When plotting the matrix, whether to rotate it 90 degrees counter-clockwise, so the leaves face right tree_kws : dict, optional Keyword arguments for the ``matplotlib.collections.LineCollection`` that is used for plotting the lines of the dendrogram tree. ax : matplotlib axis, optional Axis to plot on, otherwise uses current axis Returns ------- dendrogramplotter : _DendrogramPlotter A Dendrogram plotter object. Notes ----- Access the reordered dendrogram indices with dendrogramplotter.reordered_ind
Here is the function:
def dendrogram(
data, *,
linkage=None, axis=1, label=True, metric='euclidean',
method='average', rotate=False, tree_kws=None, ax=None
):
"""Draw a tree diagram of relationships within a matrix
Parameters
----------
data : pandas.DataFrame
Rectangular data
linkage : numpy.array, optional
Linkage matrix
axis : int, optional
Which axis to use to calculate linkage. 0 is rows, 1 is columns.
label : bool, optional
If True, label the dendrogram at leaves with column or row names
metric : str, optional
Distance metric. Anything valid for scipy.spatial.distance.pdist
method : str, optional
Linkage method to use. Anything valid for
scipy.cluster.hierarchy.linkage
rotate : bool, optional
When plotting the matrix, whether to rotate it 90 degrees
counter-clockwise, so the leaves face right
tree_kws : dict, optional
Keyword arguments for the ``matplotlib.collections.LineCollection``
that is used for plotting the lines of the dendrogram tree.
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis
Returns
-------
dendrogramplotter : _DendrogramPlotter
A Dendrogram plotter object.
Notes
-----
Access the reordered dendrogram indices with
dendrogramplotter.reordered_ind
"""
if _no_scipy:
raise RuntimeError("dendrogram requires scipy to be installed")
plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,
metric=metric, method=method,
label=label, rotate=rotate)
if ax is None:
ax = plt.gca()
return plotter.plot(ax=ax, tree_kws=tree_kws) | Draw a tree diagram of relationships within a matrix Parameters ---------- data : pandas.DataFrame Rectangular data linkage : numpy.array, optional Linkage matrix axis : int, optional Which axis to use to calculate linkage. 0 is rows, 1 is columns. label : bool, optional If True, label the dendrogram at leaves with column or row names metric : str, optional Distance metric. Anything valid for scipy.spatial.distance.pdist method : str, optional Linkage method to use. Anything valid for scipy.cluster.hierarchy.linkage rotate : bool, optional When plotting the matrix, whether to rotate it 90 degrees counter-clockwise, so the leaves face right tree_kws : dict, optional Keyword arguments for the ``matplotlib.collections.LineCollection`` that is used for plotting the lines of the dendrogram tree. ax : matplotlib axis, optional Axis to plot on, otherwise uses current axis Returns ------- dendrogramplotter : _DendrogramPlotter A Dendrogram plotter object. Notes ----- Access the reordered dendrogram indices with dendrogramplotter.reordered_ind |
171,838 | import warnings
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from . import cm
from .axisgrid import Grid
from ._compat import get_colormap
from .utils import (
despine,
axis_ticklabels_overlap,
relative_luminance,
to_utf8,
_draw_figure,
)
class ClusterGrid(Grid):
def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None, mask=None,
dendrogram_ratio=None, colors_ratio=None, cbar_pos=None):
"""Grid object for organizing clustered heatmap input on to axes"""
if _no_scipy:
raise RuntimeError("ClusterGrid requires scipy to be available")
if isinstance(data, pd.DataFrame):
self.data = data
else:
self.data = pd.DataFrame(data)
self.data2d = self.format_data(self.data, pivot_kws, z_score,
standard_scale)
self.mask = _matrix_mask(self.data2d, mask)
self._figure = plt.figure(figsize=figsize)
self.row_colors, self.row_color_labels = \
self._preprocess_colors(data, row_colors, axis=0)
self.col_colors, self.col_color_labels = \
self._preprocess_colors(data, col_colors, axis=1)
try:
row_dendrogram_ratio, col_dendrogram_ratio = dendrogram_ratio
except TypeError:
row_dendrogram_ratio = col_dendrogram_ratio = dendrogram_ratio
try:
row_colors_ratio, col_colors_ratio = colors_ratio
except TypeError:
row_colors_ratio = col_colors_ratio = colors_ratio
width_ratios = self.dim_ratios(self.row_colors,
row_dendrogram_ratio,
row_colors_ratio)
height_ratios = self.dim_ratios(self.col_colors,
col_dendrogram_ratio,
col_colors_ratio)
nrows = 2 if self.col_colors is None else 3
ncols = 2 if self.row_colors is None else 3
self.gs = gridspec.GridSpec(nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
self.ax_row_dendrogram = self._figure.add_subplot(self.gs[-1, 0])
self.ax_col_dendrogram = self._figure.add_subplot(self.gs[0, -1])
self.ax_row_dendrogram.set_axis_off()
self.ax_col_dendrogram.set_axis_off()
self.ax_row_colors = None
self.ax_col_colors = None
if self.row_colors is not None:
self.ax_row_colors = self._figure.add_subplot(
self.gs[-1, 1])
if self.col_colors is not None:
self.ax_col_colors = self._figure.add_subplot(
self.gs[1, -1])
self.ax_heatmap = self._figure.add_subplot(self.gs[-1, -1])
if cbar_pos is None:
self.ax_cbar = self.cax = None
else:
# Initialize the colorbar axes in the gridspec so that tight_layout
# works. We will move it where it belongs later. This is a hack.
self.ax_cbar = self._figure.add_subplot(self.gs[0, 0])
self.cax = self.ax_cbar # Backwards compatibility
self.cbar_pos = cbar_pos
self.dendrogram_row = None
self.dendrogram_col = None
def _preprocess_colors(self, data, colors, axis):
"""Preprocess {row/col}_colors to extract labels and convert colors."""
labels = None
if colors is not None:
if isinstance(colors, (pd.DataFrame, pd.Series)):
# If data is unindexed, raise
if (not hasattr(data, "index") and axis == 0) or (
not hasattr(data, "columns") and axis == 1
):
axis_name = "col" if axis else "row"
msg = (f"{axis_name}_colors indices can't be matched with data "
f"indices. Provide {axis_name}_colors as a non-indexed "
"datatype, e.g. by using `.to_numpy()``")
raise TypeError(msg)
# Ensure colors match data indices
if axis == 0:
colors = colors.reindex(data.index)
else:
colors = colors.reindex(data.columns)
# Replace na's with white color
# TODO We should set these to transparent instead
colors = colors.astype(object).fillna('white')
# Extract color values and labels from frame/series
if isinstance(colors, pd.DataFrame):
labels = list(colors.columns)
colors = colors.T.values
else:
if colors.name is None:
labels = [""]
else:
labels = [colors.name]
colors = colors.values
colors = _convert_colors(colors)
return colors, labels
def format_data(self, data, pivot_kws, z_score=None,
standard_scale=None):
"""Extract variables from data or use directly."""
# Either the data is already in 2d matrix format, or need to do a pivot
if pivot_kws is not None:
data2d = data.pivot(**pivot_kws)
else:
data2d = data
if z_score is not None and standard_scale is not None:
raise ValueError(
'Cannot perform both z-scoring and standard-scaling on data')
if z_score is not None:
data2d = self.z_score(data2d, z_score)
if standard_scale is not None:
data2d = self.standard_scale(data2d, standard_scale)
return data2d
def z_score(data2d, axis=1):
"""Standarize the mean and variance of the data axis
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
Returns
-------
normalized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
if axis == 1:
z_scored = data2d
else:
z_scored = data2d.T
z_scored = (z_scored - z_scored.mean()) / z_scored.std()
if axis == 1:
return z_scored
else:
return z_scored.T
def standard_scale(data2d, axis=1):
"""Divide the data by the difference between the max and min
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
Returns
-------
standardized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
# Normalize these values to range from 0 to 1
if axis == 1:
standardized = data2d
else:
standardized = data2d.T
subtract = standardized.min()
standardized = (standardized - subtract) / (
standardized.max() - standardized.min())
if axis == 1:
return standardized
else:
return standardized.T
def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):
"""Get the proportions of the figure taken up by each axes."""
ratios = [dendrogram_ratio]
if colors is not None:
# Colors are encoded as rgb, so there is an extra dimension
if np.ndim(colors) > 2:
n_colors = len(colors)
else:
n_colors = 1
ratios += [n_colors * colors_ratio]
# Add the ratio for the heatmap itself
ratios.append(1 - sum(ratios))
return ratios
def color_list_to_matrix_and_cmap(colors, ind, axis=0):
"""Turns a list of colors into a numpy matrix and matplotlib colormap
These arguments can now be plotted using heatmap(matrix, cmap)
and the provided colors will be plotted.
Parameters
----------
colors : list of matplotlib colors
Colors to label the rows or columns of a dataframe.
ind : list of ints
Ordering of the rows or columns, to reorder the original colors
by the clustered dendrogram order
axis : int
Which axis this is labeling
Returns
-------
matrix : numpy.array
A numpy array of integer values, where each indexes into the cmap
cmap : matplotlib.colors.ListedColormap
"""
try:
mpl.colors.to_rgb(colors[0])
except ValueError:
# We have a 2D color structure
m, n = len(colors), len(colors[0])
if not all(len(c) == n for c in colors[1:]):
raise ValueError("Multiple side color vectors must have same size")
else:
# We have one vector of colors
m, n = 1, len(colors)
colors = [colors]
# Map from unique colors to colormap index value
unique_colors = {}
matrix = np.zeros((m, n), int)
for i, inner in enumerate(colors):
for j, color in enumerate(inner):
idx = unique_colors.setdefault(color, len(unique_colors))
matrix[i, j] = idx
# Reorder for clustering and transpose for axis
matrix = matrix[:, ind]
if axis == 0:
matrix = matrix.T
cmap = mpl.colors.ListedColormap(list(unique_colors))
return matrix, cmap
def plot_dendrograms(self, row_cluster, col_cluster, metric, method,
row_linkage, col_linkage, tree_kws):
# Plot the row dendrogram
if row_cluster:
self.dendrogram_row = dendrogram(
self.data2d, metric=metric, method=method, label=False, axis=0,
ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,
tree_kws=tree_kws
)
else:
self.ax_row_dendrogram.set_xticks([])
self.ax_row_dendrogram.set_yticks([])
# PLot the column dendrogram
if col_cluster:
self.dendrogram_col = dendrogram(
self.data2d, metric=metric, method=method, label=False,
axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,
tree_kws=tree_kws
)
else:
self.ax_col_dendrogram.set_xticks([])
self.ax_col_dendrogram.set_yticks([])
despine(ax=self.ax_row_dendrogram, bottom=True, left=True)
despine(ax=self.ax_col_dendrogram, bottom=True, left=True)
def plot_colors(self, xind, yind, **kws):
"""Plots color labels between the dendrogram and the heatmap
Parameters
----------
heatmap_kws : dict
Keyword arguments heatmap
"""
# Remove any custom colormap and centering
# TODO this code has consistently caused problems when we
# have missed kwargs that need to be excluded that it might
# be better to rewrite *in*clusively.
kws = kws.copy()
kws.pop('cmap', None)
kws.pop('norm', None)
kws.pop('center', None)
kws.pop('annot', None)
kws.pop('vmin', None)
kws.pop('vmax', None)
kws.pop('robust', None)
kws.pop('xticklabels', None)
kws.pop('yticklabels', None)
# Plot the row colors
if self.row_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.row_colors, yind, axis=0)
# Get row_color labels
if self.row_color_labels is not None:
row_color_labels = self.row_color_labels
else:
row_color_labels = False
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,
xticklabels=row_color_labels, yticklabels=False, **kws)
# Adjust rotation of labels
if row_color_labels is not False:
plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)
else:
despine(self.ax_row_colors, left=True, bottom=True)
# Plot the column colors
if self.col_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.col_colors, xind, axis=1)
# Get col_color labels
if self.col_color_labels is not None:
col_color_labels = self.col_color_labels
else:
col_color_labels = False
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,
xticklabels=False, yticklabels=col_color_labels, **kws)
# Adjust rotation of labels, place on right side
if col_color_labels is not False:
self.ax_col_colors.yaxis.tick_right()
plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)
else:
despine(self.ax_col_colors, left=True, bottom=True)
def plot_matrix(self, colorbar_kws, xind, yind, **kws):
self.data2d = self.data2d.iloc[yind, xind]
self.mask = self.mask.iloc[yind, xind]
# Try to reorganize specified tick labels, if provided
xtl = kws.pop("xticklabels", "auto")
try:
xtl = np.asarray(xtl)[xind]
except (TypeError, IndexError):
pass
ytl = kws.pop("yticklabels", "auto")
try:
ytl = np.asarray(ytl)[yind]
except (TypeError, IndexError):
pass
# Reorganize the annotations to match the heatmap
annot = kws.pop("annot", None)
if annot is None or annot is False:
pass
else:
if isinstance(annot, bool):
annot_data = self.data2d
else:
annot_data = np.asarray(annot)
if annot_data.shape != self.data2d.shape:
err = "`data` and `annot` must have same shape."
raise ValueError(err)
annot_data = annot_data[yind][:, xind]
annot = annot_data
# Setting ax_cbar=None in clustermap call implies no colorbar
kws.setdefault("cbar", self.ax_cbar is not None)
heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,
cbar_kws=colorbar_kws, mask=self.mask,
xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)
ytl = self.ax_heatmap.get_yticklabels()
ytl_rot = None if not ytl else ytl[0].get_rotation()
self.ax_heatmap.yaxis.set_ticks_position('right')
self.ax_heatmap.yaxis.set_label_position('right')
if ytl_rot is not None:
ytl = self.ax_heatmap.get_yticklabels()
plt.setp(ytl, rotation=ytl_rot)
tight_params = dict(h_pad=.02, w_pad=.02)
if self.ax_cbar is None:
self._figure.tight_layout(**tight_params)
else:
# Turn the colorbar axes off for tight layout so that its
# ticks don't interfere with the rest of the plot layout.
# Then move it.
self.ax_cbar.set_axis_off()
self._figure.tight_layout(**tight_params)
self.ax_cbar.set_axis_on()
self.ax_cbar.set_position(self.cbar_pos)
def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,
row_linkage, col_linkage, tree_kws, **kws):
# heatmap square=True sets the aspect ratio on the axes, but that is
# not compatible with the multi-axes layout of clustergrid
if kws.get("square", False):
msg = "``square=True`` ignored in clustermap"
warnings.warn(msg)
kws.pop("square")
colorbar_kws = {} if colorbar_kws is None else colorbar_kws
self.plot_dendrograms(row_cluster, col_cluster, metric, method,
row_linkage=row_linkage, col_linkage=col_linkage,
tree_kws=tree_kws)
try:
xind = self.dendrogram_col.reordered_ind
except AttributeError:
xind = np.arange(self.data2d.shape[1])
try:
yind = self.dendrogram_row.reordered_ind
except AttributeError:
yind = np.arange(self.data2d.shape[0])
self.plot_colors(xind, yind, **kws)
self.plot_matrix(colorbar_kws, xind, yind, **kws)
return self
The provided code snippet includes necessary dependencies for implementing the `clustermap` function. Write a Python function `def clustermap( data, *, pivot_kws=None, method='average', metric='euclidean', z_score=None, standard_scale=None, figsize=(10, 10), cbar_kws=None, row_cluster=True, col_cluster=True, row_linkage=None, col_linkage=None, row_colors=None, col_colors=None, mask=None, dendrogram_ratio=.2, colors_ratio=0.03, cbar_pos=(.02, .8, .05, .18), tree_kws=None, **kwargs )` to solve the following problem:
Plot a matrix dataset as a hierarchically-clustered heatmap. This function requires scipy to be available. Parameters ---------- data : 2D array-like Rectangular data for clustering. Cannot contain NAs. pivot_kws : dict, optional If `data` is a tidy dataframe, can provide keyword arguments for pivot to create a rectangular dataframe. method : str, optional Linkage method to use for calculating clusters. See :func:`scipy.cluster.hierarchy.linkage` documentation for more information. metric : str, optional Distance metric to use for the data. See :func:`scipy.spatial.distance.pdist` documentation for more options. To use different metrics (or methods) for rows and columns, you may construct each linkage matrix yourself and provide them as `{row,col}_linkage`. z_score : int or None, optional Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores for the rows or the columns. Z scores are: z = (x - mean)/std, so values in each row (column) will get the mean of the row (column) subtracted, then divided by the standard deviation of the row (column). This ensures that each row (column) has mean of 0 and variance of 1. standard_scale : int or None, optional Either 0 (rows) or 1 (columns). Whether or not to standardize that dimension, meaning for each row or column, subtract the minimum and divide each by its maximum. figsize : tuple of (width, height), optional Overall size of the figure. cbar_kws : dict, optional Keyword arguments to pass to `cbar_kws` in :func:`heatmap`, e.g. to add a label to the colorbar. {row,col}_cluster : bool, optional If ``True``, cluster the {rows, columns}. {row,col}_linkage : :class:`numpy.ndarray`, optional Precomputed linkage matrix for the rows or columns. See :func:`scipy.cluster.hierarchy.linkage` for specific formats. {row,col}_colors : list-like or pandas DataFrame/Series, optional List of colors to label for either the rows or columns. Useful to evaluate whether samples within a group are clustered together. Can use nested lists or DataFrame for multiple color levels of labeling. If given as a :class:`pandas.DataFrame` or :class:`pandas.Series`, labels for the colors are extracted from the DataFrames column names or from the name of the Series. DataFrame/Series colors are also matched to the data by their index, ensuring colors are drawn in the correct order. mask : bool array or DataFrame, optional If passed, data will not be shown in cells where `mask` is True. Cells with missing values are automatically masked. Only used for visualizing, not for calculating. {dendrogram,colors}_ratio : float, or pair of floats, optional Proportion of the figure size devoted to the two marginal elements. If a pair is given, they correspond to (row, col) ratios. cbar_pos : tuple of (left, bottom, width, height), optional Position of the colorbar axes in the figure. Setting to ``None`` will disable the colorbar. tree_kws : dict, optional Parameters for the :class:`matplotlib.collections.LineCollection` that is used to plot the lines of the dendrogram tree. kwargs : other keyword arguments All other keyword arguments are passed to :func:`heatmap`. Returns ------- :class:`ClusterGrid` A :class:`ClusterGrid` instance. See Also -------- heatmap : Plot rectangular data as a color-encoded matrix. Notes ----- The returned object has a ``savefig`` method that should be used if you want to save the figure object without clipping the dendrograms. To access the reordered row indices, use: ``clustergrid.dendrogram_row.reordered_ind`` Column indices, use: ``clustergrid.dendrogram_col.reordered_ind`` Examples -------- .. include:: ../docstrings/clustermap.rst
Here is the function:
def clustermap(
data, *,
pivot_kws=None, method='average', metric='euclidean',
z_score=None, standard_scale=None, figsize=(10, 10),
cbar_kws=None, row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None,
row_colors=None, col_colors=None, mask=None,
dendrogram_ratio=.2, colors_ratio=0.03,
cbar_pos=(.02, .8, .05, .18), tree_kws=None,
**kwargs
):
"""
Plot a matrix dataset as a hierarchically-clustered heatmap.
This function requires scipy to be available.
Parameters
----------
data : 2D array-like
Rectangular data for clustering. Cannot contain NAs.
pivot_kws : dict, optional
If `data` is a tidy dataframe, can provide keyword arguments for
pivot to create a rectangular dataframe.
method : str, optional
Linkage method to use for calculating clusters. See
:func:`scipy.cluster.hierarchy.linkage` documentation for more
information.
metric : str, optional
Distance metric to use for the data. See
:func:`scipy.spatial.distance.pdist` documentation for more options.
To use different metrics (or methods) for rows and columns, you may
construct each linkage matrix yourself and provide them as
`{row,col}_linkage`.
z_score : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores
for the rows or the columns. Z scores are: z = (x - mean)/std, so
values in each row (column) will get the mean of the row (column)
subtracted, then divided by the standard deviation of the row (column).
This ensures that each row (column) has mean of 0 and variance of 1.
standard_scale : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to standardize that
dimension, meaning for each row or column, subtract the minimum and
divide each by its maximum.
figsize : tuple of (width, height), optional
Overall size of the figure.
cbar_kws : dict, optional
Keyword arguments to pass to `cbar_kws` in :func:`heatmap`, e.g. to
add a label to the colorbar.
{row,col}_cluster : bool, optional
If ``True``, cluster the {rows, columns}.
{row,col}_linkage : :class:`numpy.ndarray`, optional
Precomputed linkage matrix for the rows or columns. See
:func:`scipy.cluster.hierarchy.linkage` for specific formats.
{row,col}_colors : list-like or pandas DataFrame/Series, optional
List of colors to label for either the rows or columns. Useful to evaluate
whether samples within a group are clustered together. Can use nested lists or
DataFrame for multiple color levels of labeling. If given as a
:class:`pandas.DataFrame` or :class:`pandas.Series`, labels for the colors are
extracted from the DataFrames column names or from the name of the Series.
DataFrame/Series colors are also matched to the data by their index, ensuring
colors are drawn in the correct order.
mask : bool array or DataFrame, optional
If passed, data will not be shown in cells where `mask` is True.
Cells with missing values are automatically masked. Only used for
visualizing, not for calculating.
{dendrogram,colors}_ratio : float, or pair of floats, optional
Proportion of the figure size devoted to the two marginal elements. If
a pair is given, they correspond to (row, col) ratios.
cbar_pos : tuple of (left, bottom, width, height), optional
Position of the colorbar axes in the figure. Setting to ``None`` will
disable the colorbar.
tree_kws : dict, optional
Parameters for the :class:`matplotlib.collections.LineCollection`
that is used to plot the lines of the dendrogram tree.
kwargs : other keyword arguments
All other keyword arguments are passed to :func:`heatmap`.
Returns
-------
:class:`ClusterGrid`
A :class:`ClusterGrid` instance.
See Also
--------
heatmap : Plot rectangular data as a color-encoded matrix.
Notes
-----
The returned object has a ``savefig`` method that should be used if you
want to save the figure object without clipping the dendrograms.
To access the reordered row indices, use:
``clustergrid.dendrogram_row.reordered_ind``
Column indices, use:
``clustergrid.dendrogram_col.reordered_ind``
Examples
--------
.. include:: ../docstrings/clustermap.rst
"""
if _no_scipy:
raise RuntimeError("clustermap requires scipy to be available")
plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,
row_colors=row_colors, col_colors=col_colors,
z_score=z_score, standard_scale=standard_scale,
mask=mask, dendrogram_ratio=dendrogram_ratio,
colors_ratio=colors_ratio, cbar_pos=cbar_pos)
return plotter.plot(metric=metric, method=method,
colorbar_kws=cbar_kws,
row_cluster=row_cluster, col_cluster=col_cluster,
row_linkage=row_linkage, col_linkage=col_linkage,
tree_kws=tree_kws, **kwargs) | Plot a matrix dataset as a hierarchically-clustered heatmap. This function requires scipy to be available. Parameters ---------- data : 2D array-like Rectangular data for clustering. Cannot contain NAs. pivot_kws : dict, optional If `data` is a tidy dataframe, can provide keyword arguments for pivot to create a rectangular dataframe. method : str, optional Linkage method to use for calculating clusters. See :func:`scipy.cluster.hierarchy.linkage` documentation for more information. metric : str, optional Distance metric to use for the data. See :func:`scipy.spatial.distance.pdist` documentation for more options. To use different metrics (or methods) for rows and columns, you may construct each linkage matrix yourself and provide them as `{row,col}_linkage`. z_score : int or None, optional Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores for the rows or the columns. Z scores are: z = (x - mean)/std, so values in each row (column) will get the mean of the row (column) subtracted, then divided by the standard deviation of the row (column). This ensures that each row (column) has mean of 0 and variance of 1. standard_scale : int or None, optional Either 0 (rows) or 1 (columns). Whether or not to standardize that dimension, meaning for each row or column, subtract the minimum and divide each by its maximum. figsize : tuple of (width, height), optional Overall size of the figure. cbar_kws : dict, optional Keyword arguments to pass to `cbar_kws` in :func:`heatmap`, e.g. to add a label to the colorbar. {row,col}_cluster : bool, optional If ``True``, cluster the {rows, columns}. {row,col}_linkage : :class:`numpy.ndarray`, optional Precomputed linkage matrix for the rows or columns. See :func:`scipy.cluster.hierarchy.linkage` for specific formats. {row,col}_colors : list-like or pandas DataFrame/Series, optional List of colors to label for either the rows or columns. Useful to evaluate whether samples within a group are clustered together. Can use nested lists or DataFrame for multiple color levels of labeling. If given as a :class:`pandas.DataFrame` or :class:`pandas.Series`, labels for the colors are extracted from the DataFrames column names or from the name of the Series. DataFrame/Series colors are also matched to the data by their index, ensuring colors are drawn in the correct order. mask : bool array or DataFrame, optional If passed, data will not be shown in cells where `mask` is True. Cells with missing values are automatically masked. Only used for visualizing, not for calculating. {dendrogram,colors}_ratio : float, or pair of floats, optional Proportion of the figure size devoted to the two marginal elements. If a pair is given, they correspond to (row, col) ratios. cbar_pos : tuple of (left, bottom, width, height), optional Position of the colorbar axes in the figure. Setting to ``None`` will disable the colorbar. tree_kws : dict, optional Parameters for the :class:`matplotlib.collections.LineCollection` that is used to plot the lines of the dendrogram tree. kwargs : other keyword arguments All other keyword arguments are passed to :func:`heatmap`. Returns ------- :class:`ClusterGrid` A :class:`ClusterGrid` instance. See Also -------- heatmap : Plot rectangular data as a color-encoded matrix. Notes ----- The returned object has a ``savefig`` method that should be used if you want to save the figure object without clipping the dendrograms. To access the reordered row indices, use: ``clustergrid.dendrogram_row.reordered_ind`` Column indices, use: ``clustergrid.dendrogram_col.reordered_ind`` Examples -------- .. include:: ../docstrings/clustermap.rst |
171,839 | import colorsys
from itertools import cycle
import numpy as np
import matplotlib as mpl
from .external import husl
from .utils import desaturate, get_color_cycle
from .colors import xkcd_rgb, crayons
from ._compat import get_colormap
class Image:
"""
This class represents an image object. To create
:py:class:`~PIL.Image.Image` objects, use the appropriate factory
functions. There's hardly ever any reason to call the Image constructor
directly.
* :py:func:`~PIL.Image.open`
* :py:func:`~PIL.Image.new`
* :py:func:`~PIL.Image.frombytes`
"""
format = None
format_description = None
_close_exclusive_fp_after_loading = True
def __init__(self):
# FIXME: take "new" parameters / other image?
# FIXME: turn mode and size into delegating properties?
self.im = None
self.mode = ""
self._size = (0, 0)
self.palette = None
self.info = {}
self._category = 0
self.readonly = 0
self.pyaccess = None
self._exif = None
def __getattr__(self, name):
if name == "category":
deprecate("Image categories", 10, "is_animated", plural=True)
return self._category
raise AttributeError(name)
def width(self):
return self.size[0]
def height(self):
return self.size[1]
def size(self):
return self._size
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new._size = im.size
if im.mode in ("P", "PA"):
if self.palette:
new.palette = self.palette.copy()
else:
from . import ImagePalette
new.palette = ImagePalette.ImagePalette()
new.info = self.info.copy()
return new
# Context manager support
def __enter__(self):
return self
def __exit__(self, *args):
if hasattr(self, "fp") and getattr(self, "_exclusive_fp", False):
if getattr(self, "_fp", False):
if self._fp != self.fp:
self._fp.close()
self._fp = DeferredError(ValueError("Operation on closed image"))
if self.fp:
self.fp.close()
self.fp = None
def close(self):
"""
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is required to close images that have multiple frames or
have not had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
more information.
"""
try:
if getattr(self, "_fp", False):
if self._fp != self.fp:
self._fp.close()
self._fp = DeferredError(ValueError("Operation on closed image"))
if self.fp:
self.fp.close()
self.fp = None
except Exception as msg:
logger.debug("Error closing: %s", msg)
if getattr(self, "map", None):
self.map = None
# Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image
# object is gone.
self.im = DeferredError(ValueError("Operation on closed image"))
def _copy(self):
self.load()
self.im = self.im.copy()
self.pyaccess = None
self.readonly = 0
def _ensure_mutable(self):
if self.readonly:
self._copy()
else:
self.load()
def _dump(self, file=None, format=None, **options):
suffix = ""
if format:
suffix = "." + format
if not file:
f, filename = tempfile.mkstemp(suffix)
os.close(f)
else:
filename = file
if not filename.endswith(suffix):
filename = filename + suffix
self.load()
if not format or format == "PPM":
self.im.save_ppm(filename)
else:
self.save(filename, format, **options)
return filename
def __eq__(self, other):
return (
self.__class__ is other.__class__
and self.mode == other.mode
and self.size == other.size
and self.info == other.info
and self._category == other._category
and self.getpalette() == other.getpalette()
and self.tobytes() == other.tobytes()
)
def __repr__(self):
return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % (
self.__class__.__module__,
self.__class__.__name__,
self.mode,
self.size[0],
self.size[1],
id(self),
)
def _repr_pretty_(self, p, cycle):
"""IPython plain text display support"""
# Same as __repr__ but without unpredictable id(self),
# to keep Jupyter notebook `text/plain` output stable.
p.text(
"<%s.%s image mode=%s size=%dx%d>"
% (
self.__class__.__module__,
self.__class__.__name__,
self.mode,
self.size[0],
self.size[1],
)
)
def _repr_png_(self):
"""iPython display hook support
:returns: png version of the image as bytes
"""
b = io.BytesIO()
try:
self.save(b, "PNG")
except Exception as e:
msg = "Could not save to PNG for display"
raise ValueError(msg) from e
return b.getvalue()
def __array_interface__(self):
# numpy array interface support
new = {"version": 3}
try:
if self.mode == "1":
# Binary images need to be extended from bits to bytes
# See: https://github.com/python-pillow/Pillow/issues/350
new["data"] = self.tobytes("raw", "L")
else:
new["data"] = self.tobytes()
except Exception as e:
if not isinstance(e, (MemoryError, RecursionError)):
try:
import numpy
from packaging.version import parse as parse_version
except ImportError:
pass
else:
if parse_version(numpy.__version__) < parse_version("1.23"):
warnings.warn(e)
raise
new["shape"], new["typestr"] = _conv_type_shape(self)
return new
def __getstate__(self):
return [self.info, self.mode, self.size, self.getpalette(), self.tobytes()]
def __setstate__(self, state):
Image.__init__(self)
info, mode, size, palette, data = state
self.info = info
self.mode = mode
self._size = size
self.im = core.new(mode, size)
if mode in ("L", "LA", "P", "PA") and palette:
self.putpalette(palette)
self.frombytes(data)
def tobytes(self, encoder_name="raw", *args):
"""
Return image as a bytes object.
.. warning::
This method returns the raw image data from the internal
storage. For compressed image data (e.g. PNG, JPEG) use
:meth:`~.save`, with a BytesIO parameter for in-memory
data.
:param encoder_name: What encoder to use. The default is to
use the standard "raw" encoder.
A list of C encoders can be seen under
codecs section of the function array in
:file:`_imaging.c`. Python encoders are
registered within the relevant plugins.
:param args: Extra arguments to the encoder.
:returns: A :py:class:`bytes` object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
if self.width == 0 or self.height == 0:
return b""
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
bufsize = max(65536, self.size[0] * 4) # see RawEncode.c
output = []
while True:
bytes_consumed, errcode, data = e.encode(bufsize)
output.append(data)
if errcode:
break
if errcode < 0:
msg = f"encoder error {errcode} in tobytes"
raise RuntimeError(msg)
return b"".join(output)
def tobitmap(self, name="image"):
"""
Returns the image converted to an X11 bitmap.
.. note:: This method only works for mode "1" images.
:param name: The name prefix to use for the bitmap variables.
:returns: A string containing an X11 bitmap.
:raises ValueError: If the mode is not "1"
"""
self.load()
if self.mode != "1":
msg = "not a bitmap"
raise ValueError(msg)
data = self.tobytes("xbm")
return b"".join(
[
f"#define {name}_width {self.size[0]}\n".encode("ascii"),
f"#define {name}_height {self.size[1]}\n".encode("ascii"),
f"static char {name}_bits[] = {{\n".encode("ascii"),
data,
b"};",
]
)
def frombytes(self, data, decoder_name="raw", *args):
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
msg = "not enough image data"
raise ValueError(msg)
if s[1] != 0:
msg = "cannot decode image data"
raise ValueError(msg)
def load(self):
"""
Allocates storage for the image and loads the pixel data. In
normal cases, you don't need to call this method, since the
Image class automatically loads an opened image when it is
accessed for the first time.
If the file associated with the image was opened by Pillow, then this
method will close it. The exception to this is if the image has
multiple frames, in which case the file will be left open for seek
operations. See :ref:`file-handling` for more information.
:returns: An image access object.
:rtype: :ref:`PixelAccess` or :py:class:`PIL.PyAccess`
"""
if self.im is not None and self.palette and self.palette.dirty:
# realize palette
mode, arr = self.palette.getdata()
self.im.putpalette(mode, arr)
self.palette.dirty = 0
self.palette.rawmode = None
if "transparency" in self.info and mode in ("LA", "PA"):
if isinstance(self.info["transparency"], int):
self.im.putpalettealpha(self.info["transparency"], 0)
else:
self.im.putpalettealphas(self.info["transparency"])
self.palette.mode = "RGBA"
else:
palette_mode = "RGBA" if mode.startswith("RGBA") else "RGB"
self.palette.mode = palette_mode
self.palette.palette = self.im.getpalette(palette_mode, palette_mode)
if self.im is not None:
if cffi and USE_CFFI_ACCESS:
if self.pyaccess:
return self.pyaccess
from . import PyAccess
self.pyaccess = PyAccess.new(self, self.readonly)
if self.pyaccess:
return self.pyaccess
return self.im.pixel_access(self.readonly)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
def convert(
self, mode=None, matrix=None, dither=None, palette=Palette.WEB, colors=256
):
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
The current version supports all possible conversions between
"L", "RGB" and "CMYK". The ``matrix`` argument only supports "L"
and "RGB".
When translating a color image to greyscale (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a greyscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is ``None``, all values larger than 127 are set to 255 (white),
all other values to 0 (black). To use other thresholds, use the
:py:meth:`~PIL.Image.Image.point` method.
When converting from "RGBA" to "P" without a ``matrix`` argument,
this passes the operation to :py:meth:`~PIL.Image.Image.quantize`,
and ``dither`` and ``palette`` are ignored.
When converting from "PA", if an "RGBA" palette is present, the alpha
channel from the image will be used instead of the values from the palette.
:param mode: The requested mode. See: :ref:`concept-modes`.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
(default). Note that this is not used when ``matrix`` is supplied.
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are :data:`Palette.WEB` or
:data:`Palette.ADAPTIVE`.
:param colors: Number of colors to use for the :data:`Palette.ADAPTIVE`
palette. Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
has_transparency = self.info.get("transparency") is not None
if not mode and self.mode == "P":
# determine default mode
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
if mode == "RGB" and has_transparency:
mode = "RGBA"
if not mode or (mode == self.mode and not matrix):
return self.copy()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
msg = "illegal conversion"
raise ValueError(msg)
im = self.im.convert_matrix(mode, matrix)
new = self._new(im)
if has_transparency and self.im.bands == 3:
transparency = new.info["transparency"]
def convert_transparency(m, v):
v = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3] * 0.5
return max(0, min(255, int(v)))
if mode == "L":
transparency = convert_transparency(matrix, transparency)
elif len(mode) == 3:
transparency = tuple(
convert_transparency(matrix[i * 4 : i * 4 + 4], transparency)
for i in range(0, len(transparency))
)
new.info["transparency"] = transparency
return new
if mode == "P" and self.mode == "RGBA":
return self.quantize(colors)
trns = None
delete_trns = False
# transparency handling
if has_transparency:
if (self.mode in ("1", "L", "I") and mode in ("LA", "RGBA")) or (
self.mode == "RGB" and mode == "RGBA"
):
# Use transparent conversion to promote from transparent
# color to an alpha channel.
new_im = self._new(
self.im.convert_transparent(mode, self.info["transparency"])
)
del new_im.info["transparency"]
return new_im
elif self.mode in ("L", "RGB", "P") and mode in ("L", "RGB", "P"):
t = self.info["transparency"]
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn(
"Palette images with Transparency expressed in bytes should be "
"converted to RGBA images"
)
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = Image()._new(core.new(self.mode, (1, 1)))
if self.mode == "P":
trns_im.putpalette(self.palette)
if isinstance(t, tuple):
err = "Couldn't allocate a palette color for transparency"
try:
t = trns_im.palette.getcolor(t, self)
except ValueError as e:
if str(e) == "cannot allocate more than 256 colors":
# If all 256 colors are in use,
# then there is no need for transparency
t = None
else:
raise ValueError(err) from e
if t is None:
trns = None
else:
trns_im.putpixel((0, 0), t)
if mode in ("L", "RGB"):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert("RGB")
trns = trns_im.getpixel((0, 0))
elif self.mode == "P" and mode in ("LA", "PA", "RGBA"):
t = self.info["transparency"]
delete_trns = True
if isinstance(t, bytes):
self.im.putpalettealphas(t)
elif isinstance(t, int):
self.im.putpalettealpha(t, 0)
else:
msg = "Transparency for P mode should be bytes or int"
raise ValueError(msg)
if mode == "P" and palette == Palette.ADAPTIVE:
im = self.im.quantize(colors)
new = self._new(im)
from . import ImagePalette
new.palette = ImagePalette.ImagePalette("RGB", new.im.getpalette("RGB"))
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del new.info["transparency"]
if trns is not None:
try:
new.info["transparency"] = new.palette.getcolor(trns, new)
except Exception:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del new.info["transparency"]
warnings.warn("Couldn't allocate palette entry for transparency")
return new
if "LAB" in (self.mode, mode):
other_mode = mode if self.mode == "LAB" else self.mode
if other_mode in ("RGB", "RGBA", "RGBX"):
from . import ImageCms
srgb = ImageCms.createProfile("sRGB")
lab = ImageCms.createProfile("LAB")
profiles = [lab, srgb] if self.mode == "LAB" else [srgb, lab]
transform = ImageCms.buildTransform(
profiles[0], profiles[1], self.mode, mode
)
return transform.apply(self)
# colorspace conversion
if dither is None:
dither = Dither.FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
modebase = getmodebase(self.mode)
if modebase == self.mode:
raise
im = self.im.convert(modebase)
im = im.convert(mode, dither)
except KeyError as e:
msg = "illegal conversion"
raise ValueError(msg) from e
new_im = self._new(im)
if mode == "P" and palette != Palette.ADAPTIVE:
from . import ImagePalette
new_im.palette = ImagePalette.ImagePalette("RGB", list(range(256)) * 3)
if delete_trns:
# crash fail if we leave a bytes transparency in an rgb/l mode.
del new_im.info["transparency"]
if trns is not None:
if new_im.mode == "P":
try:
new_im.info["transparency"] = new_im.palette.getcolor(trns, new_im)
except ValueError as e:
del new_im.info["transparency"]
if str(e) != "cannot allocate more than 256 colors":
# If all 256 colors are in use,
# then there is no need for transparency
warnings.warn(
"Couldn't allocate palette entry for transparency"
)
else:
new_im.info["transparency"] = trns
return new_im
def quantize(
self,
colors=256,
method=None,
kmeans=0,
palette=None,
dither=Dither.FLOYDSTEINBERG,
):
"""
Convert the image to 'P' mode with the specified number
of colors.
:param colors: The desired number of colors, <= 256
:param method: :data:`Quantize.MEDIANCUT` (median cut),
:data:`Quantize.MAXCOVERAGE` (maximum coverage),
:data:`Quantize.FASTOCTREE` (fast octree),
:data:`Quantize.LIBIMAGEQUANT` (libimagequant; check support
using :py:func:`PIL.features.check_feature` with
``feature="libimagequant"``).
By default, :data:`Quantize.MEDIANCUT` will be used.
The exception to this is RGBA images. :data:`Quantize.MEDIANCUT`
and :data:`Quantize.MAXCOVERAGE` do not support RGBA images, so
:data:`Quantize.FASTOCTREE` is used by default instead.
:param kmeans: Integer
:param palette: Quantize to the palette of given
:py:class:`PIL.Image.Image`.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`
(default).
:returns: A new image
"""
self.load()
if method is None:
# defaults:
method = Quantize.MEDIANCUT
if self.mode == "RGBA":
method = Quantize.FASTOCTREE
if self.mode == "RGBA" and method not in (
Quantize.FASTOCTREE,
Quantize.LIBIMAGEQUANT,
):
# Caller specified an invalid mode.
msg = (
"Fast Octree (method == 2) and libimagequant (method == 3) "
"are the only valid methods for quantizing RGBA images"
)
raise ValueError(msg)
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
msg = "bad mode for palette image"
raise ValueError(msg)
if self.mode != "RGB" and self.mode != "L":
msg = "only RGB or L mode images can be quantized to a palette"
raise ValueError(msg)
im = self.im.convert("P", dither, palette.im)
new_im = self._new(im)
new_im.palette = palette.palette.copy()
return new_im
im = self._new(self.im.quantize(colors, method, kmeans))
from . import ImagePalette
mode = im.im.getpalettemode()
palette = im.im.getpalette(mode, mode)[: colors * len(mode)]
im.palette = ImagePalette.ImagePalette(mode, palette)
return im
def copy(self):
"""
Copies this image. Use this method if you wish to paste things
into an image, but still retain the original.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
return self._new(self.im.copy())
__copy__ = copy
def crop(self, box=None):
"""
Returns a rectangular region from this image. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate. See :ref:`coordinate-system`.
Note: Prior to Pillow 3.4.0, this was a lazy operation.
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if box is None:
return self.copy()
if box[2] < box[0]:
msg = "Coordinate 'right' is less than 'left'"
raise ValueError(msg)
elif box[3] < box[1]:
msg = "Coordinate 'lower' is less than 'upper'"
raise ValueError(msg)
self.load()
return self._new(self._crop(self.im, box))
def _crop(self, im, box):
"""
Returns a rectangular region from the core image object im.
This is equivalent to calling im.crop((x0, y0, x1, y1)), but
includes additional sanity checks.
:param im: a core image object
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:returns: A core image object.
"""
x0, y0, x1, y1 = map(int, map(round, box))
absolute_values = (abs(x1 - x0), abs(y1 - y0))
_decompression_bomb_check(absolute_values)
return im.crop((x0, y0, x1, y1))
def draft(self, mode, size):
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to greyscale while loading it.
If any changes are made, returns a tuple with the chosen ``mode`` and
``box`` with coordinates of the original image within the altered one.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
Note: This method is not implemented for most images. It is
currently implemented only for JPEG and MPO images.
:param mode: The requested mode.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
"""
pass
def _expand(self, xmargin, ymargin=None):
if ymargin is None:
ymargin = xmargin
self.load()
return self._new(self.im.expand(xmargin, ymargin, 0))
def filter(self, filter):
"""
Filters this image using the given filter. For a list of
available filters, see the :py:mod:`~PIL.ImageFilter` module.
:param filter: Filter kernel.
:returns: An :py:class:`~PIL.Image.Image` object."""
from . import ImageFilter
self.load()
if isinstance(filter, Callable):
filter = filter()
if not hasattr(filter, "filter"):
msg = "filter argument should be ImageFilter.Filter instance or class"
raise TypeError(msg)
multiband = isinstance(filter, ImageFilter.MultibandFilter)
if self.im.bands == 1 or multiband:
return self._new(filter.filter(self.im))
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
def getbands(self):
"""
Returns a tuple containing the name of each band in this image.
For example, ``getbands`` on an RGB image returns ("R", "G", "B").
:returns: A tuple containing band names.
:rtype: tuple
"""
return ImageMode.getmode(self.mode).bands
def getbbox(self):
"""
Calculates the bounding box of the non-zero regions in the
image.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. See
:ref:`coordinate-system`. If the image is completely empty, this
method returns None.
"""
self.load()
return self.im.getbbox()
def getcolors(self, maxcolors=256):
"""
Returns a list of colors used in this image.
The colors will be in the image's mode. For example, an RGB image will
return a tuple of (red, green, blue) color values, and a P image will
return the index of the color in the palette.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
def getdata(self, band=None):
"""
Returns the contents of this image as a sequence object
containing pixel values. The sequence object is flattened, so
that values for line one follow directly after the values of
line zero, and so on.
Note that the sequence object returned by this method is an
internal PIL data type, which only supports certain sequence
operations. To convert it to an ordinary sequence (e.g. for
printing), use ``list(im.getdata())``.
:param band: What band to return. The default is to return
all bands. To return a single band, pass in the index
value (e.g. 0 to get the "R" band from an "RGB" image).
:returns: A sequence-like object.
"""
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
def getextrema(self):
"""
Gets the minimum and maximum pixel values for each band in
the image.
:returns: For a single-band image, a 2-tuple containing the
minimum and maximum pixel value. For a multi-band image,
a tuple containing one 2-tuple for each band.
"""
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
def _getxmp(self, xmp_tags):
def get_name(tag):
return tag.split("}")[1]
def get_value(element):
value = {get_name(k): v for k, v in element.attrib.items()}
children = list(element)
if children:
for child in children:
name = get_name(child.tag)
child_value = get_value(child)
if name in value:
if not isinstance(value[name], list):
value[name] = [value[name]]
value[name].append(child_value)
else:
value[name] = child_value
elif value:
if element.text:
value["text"] = element.text
else:
return element.text
return value
if ElementTree is None:
warnings.warn("XMP data cannot be read without defusedxml dependency")
return {}
else:
root = ElementTree.fromstring(xmp_tags)
return {get_name(root.tag): get_value(root)}
def getexif(self):
"""
Gets EXIF data from the image.
:returns: an :py:class:`~PIL.Image.Exif` object.
"""
if self._exif is None:
self._exif = Exif()
self._exif._loaded = False
elif self._exif._loaded:
return self._exif
self._exif._loaded = True
exif_info = self.info.get("exif")
if exif_info is None:
if "Raw profile type exif" in self.info:
exif_info = bytes.fromhex(
"".join(self.info["Raw profile type exif"].split("\n")[3:])
)
elif hasattr(self, "tag_v2"):
self._exif.bigtiff = self.tag_v2._bigtiff
self._exif.endian = self.tag_v2._endian
self._exif.load_from_fp(self.fp, self.tag_v2._offset)
if exif_info is not None:
self._exif.load(exif_info)
# XMP tags
if 0x0112 not in self._exif:
xmp_tags = self.info.get("XML:com.adobe.xmp")
if xmp_tags:
match = re.search(r'tiff:Orientation(="|>)([0-9])', xmp_tags)
if match:
self._exif[0x0112] = int(match[2])
return self._exif
def _reload_exif(self):
if self._exif is None or not self._exif._loaded:
return
self._exif._loaded = False
self.getexif()
def get_child_images(self):
child_images = []
exif = self.getexif()
ifds = []
if ExifTags.Base.SubIFDs in exif:
subifd_offsets = exif[ExifTags.Base.SubIFDs]
if subifd_offsets:
if not isinstance(subifd_offsets, tuple):
subifd_offsets = (subifd_offsets,)
for subifd_offset in subifd_offsets:
ifds.append((exif._get_ifd_dict(subifd_offset), subifd_offset))
ifd1 = exif.get_ifd(ExifTags.IFD.IFD1)
if ifd1 and ifd1.get(513):
ifds.append((ifd1, exif._info.next))
offset = None
for ifd, ifd_offset in ifds:
current_offset = self.fp.tell()
if offset is None:
offset = current_offset
fp = self.fp
thumbnail_offset = ifd.get(513)
if thumbnail_offset is not None:
try:
thumbnail_offset += self._exif_offset
except AttributeError:
pass
self.fp.seek(thumbnail_offset)
data = self.fp.read(ifd.get(514))
fp = io.BytesIO(data)
with open(fp) as im:
if thumbnail_offset is None:
im._frame_pos = [ifd_offset]
im._seek(0)
im.load()
child_images.append(im)
if offset is not None:
self.fp.seek(offset)
return child_images
def getim(self):
"""
Returns a capsule that points to the internal image memory.
:returns: A capsule object.
"""
self.load()
return self.im.ptr
def getpalette(self, rawmode="RGB"):
"""
Returns the image palette as a list.
:param rawmode: The mode in which to return the palette. ``None`` will
return the palette in its current mode.
.. versionadded:: 9.1.0
:returns: A list of color values [r, g, b, ...], or None if the
image has no palette.
"""
self.load()
try:
mode = self.im.getpalettemode()
except ValueError:
return None # no palette
if rawmode is None:
rawmode = mode
return list(self.im.getpalette(mode, rawmode))
def apply_transparency(self):
"""
If a P mode image has a "transparency" key in the info dictionary,
remove the key and instead apply the transparency to the palette.
Otherwise, the image is unchanged.
"""
if self.mode != "P" or "transparency" not in self.info:
return
from . import ImagePalette
palette = self.getpalette("RGBA")
transparency = self.info["transparency"]
if isinstance(transparency, bytes):
for i, alpha in enumerate(transparency):
palette[i * 4 + 3] = alpha
else:
palette[transparency * 4 + 3] = 0
self.palette = ImagePalette.ImagePalette("RGBA", bytes(palette))
self.palette.dirty = 1
del self.info["transparency"]
def getpixel(self, xy):
"""
Returns the pixel value at a given position.
:param xy: The coordinate, given as (x, y). See
:ref:`coordinate-system`.
:returns: The pixel value. If the image is a multi-layer image,
this method returns a tuple.
"""
self.load()
if self.pyaccess:
return self.pyaccess.getpixel(xy)
return self.im.getpixel(xy)
def getprojection(self):
"""
Get projection to x and y axes
:returns: Two sequences, indicating where there are non-zero
pixels along the X-axis and the Y-axis, respectively.
"""
self.load()
x, y = self.im.getprojection()
return list(x), list(y)
def histogram(self, mask=None, extrema=None):
"""
Returns a histogram for the image. The histogram is returned as a
list of pixel counts, one for each pixel value in the source
image. Counts are grouped into 256 bins for each band, even if
the image has more than 8 bits per band. If the image has more
than one band, the histograms for all bands are concatenated (for
example, the histogram for an "RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a greyscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:param extrema: An optional tuple of manually-specified extrema.
:returns: A list containing pixel counts.
"""
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
def entropy(self, mask=None, extrema=None):
"""
Calculates and returns the entropy for the image.
A bilevel image (mode "1") is treated as a greyscale ("L")
image by this method.
If a mask is provided, the method employs the histogram for
those parts of the image where the mask image is non-zero.
The mask image must have the same size as the image, and be
either a bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:param extrema: An optional tuple of manually-specified extrema.
:returns: A float value representing the image entropy
"""
self.load()
if mask:
mask.load()
return self.im.entropy((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.entropy(extrema)
return self.im.entropy()
def paste(self, im, box=None, mask=None):
"""
Pastes another image into this image. The box argument is either
a 2-tuple giving the upper left corner, a 4-tuple defining the
left, upper, right, and lower pixel coordinate, or None (same as
(0, 0)). See :ref:`coordinate-system`. If a 4-tuple is given, the size
of the pasted image must match the size of the region.
If the modes don't match, the pasted image is converted to the mode of
this image (see the :py:meth:`~PIL.Image.Image.convert` method for
details).
Instead of an image, the source can be a integer or tuple
containing pixel values. The method then fills the region
with the given color. When creating RGB images, you can
also use color strings as supported by the ImageColor module.
If a mask is given, this method updates only the regions
indicated by the mask. You can use either "1", "L", "LA", "RGBA"
or "RGBa" images (if present, the alpha band is used as mask).
Where the mask is 255, the given image is copied as is. Where
the mask is 0, the current value is preserved. Intermediate
values will mix the two images together, including their alpha
channels if they have them.
See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to
combine images with respect to their alpha channels.
:param im: Source image or pixel value (integer or tuple).
:param box: An optional 4-tuple giving the region to paste into.
If a 2-tuple is used instead, it's treated as the upper left
corner. If omitted or None, the source is pasted into the
upper left corner.
If an image is given as the second argument and there is no
third, the box defaults to (0, 0), and the second argument
is interpreted as a mask image.
:param mask: An optional mask image.
"""
if isImageType(box) and mask is None:
# abbreviated paste(im, mask) syntax
mask = box
box = None
if box is None:
box = (0, 0)
if len(box) == 2:
# upper left corner given; get size from image or mask
if isImageType(im):
size = im.size
elif isImageType(mask):
size = mask.size
else:
# FIXME: use self.size here?
msg = "cannot determine region size; use 4-item box"
raise ValueError(msg)
box += (box[0] + size[0], box[1] + size[1])
if isinstance(im, str):
from . import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("LA", "RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self._ensure_mutable()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):
"""'In-place' analog of Image.alpha_composite. Composites an image
onto this image.
:param im: image to composite over this one
:param dest: Optional 2 tuple (left, top) specifying the upper
left corner in this (destination) image.
:param source: Optional 2 (left, top) tuple for the upper left
corner in the overlay source image, or 4 tuple (left, top, right,
bottom) for the bounds of the source rectangle
Performance Note: Not currently implemented in-place in the core layer.
"""
if not isinstance(source, (list, tuple)):
msg = "Source must be a tuple"
raise ValueError(msg)
if not isinstance(dest, (list, tuple)):
msg = "Destination must be a tuple"
raise ValueError(msg)
if not len(source) in (2, 4):
msg = "Source must be a 2 or 4-tuple"
raise ValueError(msg)
if not len(dest) == 2:
msg = "Destination must be a 2-tuple"
raise ValueError(msg)
if min(source) < 0:
msg = "Source must be non-negative"
raise ValueError(msg)
if len(source) == 2:
source = source + im.size
# over image, crop if it's not the whole thing.
if source == (0, 0) + im.size:
overlay = im
else:
overlay = im.crop(source)
# target for the paste
box = dest + (dest[0] + overlay.width, dest[1] + overlay.height)
# destination image. don't copy if we're using the whole image.
if box == (0, 0) + self.size:
background = self
else:
background = self.crop(box)
result = alpha_composite(background, overlay)
self.paste(result, box)
def point(self, lut, mode=None):
"""
Maps this image through a lookup table or function.
:param lut: A lookup table, containing 256 (or 65536 if
self.mode=="I" and mode == "L") values per band in the
image. A function can be used instead, it should take a
single argument. The function is called once for each
possible pixel value, and the resulting table is applied to
all bands of the image.
It may also be an :py:class:`~PIL.Image.ImagePointHandler`
object::
class Example(Image.ImagePointHandler):
def point(self, data):
# Return result
:param mode: Output mode (default is same as input). In the
current version, this can only be used if the source image
has mode "L" or "P", and the output has mode "1" or the
source image mode is "I" and the output mode is "L".
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if callable(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
# UNDONE wiredfool -- I think this prevents us from ever doing
# a gamma function point transform on > 8bit images.
scale, offset = _getscaleoffset(lut)
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
lut = [lut(i) for i in range(256)] * self.im.bands
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
msg = "point operation not supported for this mode"
raise ValueError(msg)
if mode != "F":
lut = [round(i) for i in lut]
return self._new(self.im.point(lut, mode))
def putalpha(self, alpha):
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer or
other color value.
"""
self._ensure_mutable()
if self.mode not in ("LA", "PA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
except (AttributeError, ValueError) as e:
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "PA", "RGBA"):
raise ValueError from e # sanity check
self.im = im
self.pyaccess = None
self.mode = self.im.mode
except KeyError as e:
msg = "illegal image mode"
raise ValueError(msg) from e
if self.mode in ("LA", "PA"):
band = 1
else:
band = 3
if isImageType(alpha):
# alpha layer
if alpha.mode not in ("1", "L"):
msg = "illegal image mode"
raise ValueError(msg)
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
def putdata(self, data, scale=1.0, offset=0.0):
"""
Copies pixel data from a flattened sequence object into the image. The
values should start at the upper left corner (0, 0), continue to the
end of the line, followed directly by the first value of the second
line, and so on. Data will be read until either the image or the
sequence ends. The scale and offset values are used to adjust the
sequence values: **pixel = value*scale + offset**.
:param data: A flattened sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
self._ensure_mutable()
self.im.putdata(data, scale, offset)
def putpalette(self, data, rawmode="RGB"):
"""
Attaches a palette to this image. The image must be a "P", "PA", "L"
or "LA" image.
The palette sequence must contain at most 256 colors, made up of one
integer value for each channel in the raw mode.
For example, if the raw mode is "RGB", then it can contain at most 768
values, made up of red, green and blue values for the corresponding pixel
index in the 256 colors.
If the raw mode is "RGBA", then it can contain at most 1024 values,
containing red, green, blue and alpha values.
Alternatively, an 8-bit string may be used instead of an integer sequence.
:param data: A palette sequence (either a list or a string).
:param rawmode: The raw mode of the palette. Either "RGB", "RGBA", or a mode
that can be transformed to "RGB" or "RGBA" (e.g. "R", "BGR;15", "RGBA;L").
"""
from . import ImagePalette
if self.mode not in ("L", "LA", "P", "PA"):
msg = "illegal image mode"
raise ValueError(msg)
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
if not isinstance(data, bytes):
data = bytes(data)
palette = ImagePalette.raw(rawmode, data)
self.mode = "PA" if "A" in self.mode else "P"
self.palette = palette
self.palette.mode = "RGB"
self.load() # install new palette
def putpixel(self, xy, value):
"""
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images. In addition to this, RGB and RGBA tuples are
accepted for P and PA images.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
module instead.
See:
* :py:meth:`~PIL.Image.Image.paste`
* :py:meth:`~PIL.Image.Image.putdata`
* :py:mod:`~PIL.ImageDraw`
:param xy: The pixel coordinate, given as (x, y). See
:ref:`coordinate-system`.
:param value: The pixel value.
"""
if self.readonly:
self._copy()
self.load()
if self.pyaccess:
return self.pyaccess.putpixel(xy, value)
if (
self.mode in ("P", "PA")
and isinstance(value, (list, tuple))
and len(value) in [3, 4]
):
# RGB or RGBA value for a P or PA image
if self.mode == "PA":
alpha = value[3] if len(value) == 4 else 255
value = value[:3]
value = self.palette.getcolor(value, self)
if self.mode == "PA":
value = (value, alpha)
return self.im.putpixel(xy, value)
def remap_palette(self, dest_map, source_palette=None):
"""
Rewrites the image to reorder the palette.
:param dest_map: A list of indexes into the original palette.
e.g. ``[1,0]`` would swap a two item palette, and ``list(range(256))``
is the identity transform.
:param source_palette: Bytes or None.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
from . import ImagePalette
if self.mode not in ("L", "P"):
msg = "illegal image mode"
raise ValueError(msg)
bands = 3
palette_mode = "RGB"
if source_palette is None:
if self.mode == "P":
self.load()
palette_mode = self.im.getpalettemode()
if palette_mode == "RGBA":
bands = 4
source_palette = self.im.getpalette(palette_mode, palette_mode)
else: # L-mode
source_palette = bytearray(i // 3 for i in range(768))
palette_bytes = b""
new_positions = [0] * 256
# pick only the used colors from the palette
for i, oldPosition in enumerate(dest_map):
palette_bytes += source_palette[
oldPosition * bands : oldPosition * bands + bands
]
new_positions[oldPosition] = i
# replace the palette color id of all pixel with the new id
# Palette images are [0..255], mapped through a 1 or 3
# byte/color map. We need to remap the whole image
# from palette 1 to palette 2. New_positions is
# an array of indexes into palette 1. Palette 2 is
# palette 1 with any holes removed.
# We're going to leverage the convert mechanism to use the
# C code to remap the image from palette 1 to palette 2,
# by forcing the source image into 'L' mode and adding a
# mapping 'L' mode palette, then converting back to 'L'
# sans palette thus converting the image bytes, then
# assigning the optimized RGB palette.
# perf reference, 9500x4000 gif, w/~135 colors
# 14 sec prepatch, 1 sec postpatch with optimization forced.
mapping_palette = bytearray(new_positions)
m_im = self.copy()
m_im.mode = "P"
m_im.palette = ImagePalette.ImagePalette(
palette_mode, palette=mapping_palette * bands
)
# possibly set palette dirty, then
# m_im.putpalette(mapping_palette, 'L') # converts to 'P'
# or just force it.
# UNDONE -- this is part of the general issue with palettes
m_im.im.putpalette(palette_mode + ";L", m_im.palette.tobytes())
m_im = m_im.convert("L")
m_im.putpalette(palette_bytes, palette_mode)
m_im.palette = ImagePalette.ImagePalette(palette_mode, palette=palette_bytes)
if "transparency" in self.info:
try:
m_im.info["transparency"] = dest_map.index(self.info["transparency"])
except ValueError:
if "transparency" in m_im.info:
del m_im.info["transparency"]
return m_im
def _get_safe_box(self, size, resample, box):
"""Expands the box so it includes adjacent pixels
that may be used by resampling with the given resampling filter.
"""
filter_support = _filters_support[resample] - 0.5
scale_x = (box[2] - box[0]) / size[0]
scale_y = (box[3] - box[1]) / size[1]
support_x = filter_support * scale_x
support_y = filter_support * scale_y
return (
max(0, int(box[0] - support_x)),
max(0, int(box[1] - support_y)),
min(self.size[0], math.ceil(box[2] + support_x)),
min(self.size[1], math.ceil(box[3] + support_y)),
)
def resize(self, size, resample=None, box=None, reducing_gap=None):
"""
Returns a resized copy of this image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If the image has mode "1" or "P", it is always set to
:py:data:`Resampling.NEAREST`. If the image mode specifies a number
of bits, such as "I;16", then the default filter is
:py:data:`Resampling.NEAREST`. Otherwise, the default filter is
:py:data:`Resampling.BICUBIC`. See: :ref:`concept-filters`.
:param box: An optional 4-tuple of floats providing
the source image region to be scaled.
The values must be within (0, 0, width, height) rectangle.
If omitted or None, the entire source is used.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce`.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is None (no optimization).
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if resample is None:
type_special = ";" in self.mode
resample = Resampling.NEAREST if type_special else Resampling.BICUBIC
elif resample not in (
Resampling.NEAREST,
Resampling.BILINEAR,
Resampling.BICUBIC,
Resampling.LANCZOS,
Resampling.BOX,
Resampling.HAMMING,
):
msg = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
(Resampling.LANCZOS, "Image.Resampling.LANCZOS"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
(Resampling.BOX, "Image.Resampling.BOX"),
(Resampling.HAMMING, "Image.Resampling.HAMMING"),
)
]
msg += " Use " + ", ".join(filters[:-1]) + " or " + filters[-1]
raise ValueError(msg)
if reducing_gap is not None and reducing_gap < 1.0:
msg = "reducing_gap must be 1.0 or greater"
raise ValueError(msg)
size = tuple(size)
self.load()
if box is None:
box = (0, 0) + self.size
else:
box = tuple(box)
if self.size == size and box == (0, 0) + self.size:
return self.copy()
if self.mode in ("1", "P"):
resample = Resampling.NEAREST
if self.mode in ["LA", "RGBA"] and resample != Resampling.NEAREST:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.resize(size, resample, box)
return im.convert(self.mode)
self.load()
if reducing_gap is not None and resample != Resampling.NEAREST:
factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1
factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1
if factor_x > 1 or factor_y > 1:
reduce_box = self._get_safe_box(size, resample, box)
factor = (factor_x, factor_y)
if callable(self.reduce):
self = self.reduce(factor, box=reduce_box)
else:
self = Image.reduce(self, factor, box=reduce_box)
box = (
(box[0] - reduce_box[0]) / factor_x,
(box[1] - reduce_box[1]) / factor_y,
(box[2] - reduce_box[0]) / factor_x,
(box[3] - reduce_box[1]) / factor_y,
)
return self._new(self.im.resize(size, resample, box))
def reduce(self, factor, box=None):
"""
Returns a copy of the image reduced ``factor`` times.
If the size of the image is not dividable by ``factor``,
the resulting size will be rounded up.
:param factor: A greater than 0 integer or tuple of two integers
for width and height separately.
:param box: An optional 4-tuple of ints providing
the source image region to be reduced.
The values must be within ``(0, 0, width, height)`` rectangle.
If omitted or ``None``, the entire source is used.
"""
if not isinstance(factor, (list, tuple)):
factor = (factor, factor)
if box is None:
box = (0, 0) + self.size
else:
box = tuple(box)
if factor == (1, 1) and box == (0, 0) + self.size:
return self.copy()
if self.mode in ["LA", "RGBA"]:
im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
im = im.reduce(factor, box)
return im.convert(self.mode)
self.load()
return self._new(self.im.reduce(factor, box))
def rotate(
self,
angle,
resample=Resampling.NEAREST,
expand=0,
center=None,
translate=None,
fillcolor=None,
):
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image has
mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image. Note that the expand flag assumes rotation around
the center and no translation.
:param center: Optional center of rotation (a 2-tuple). Origin is
the upper left corner. Default is the center of the image.
:param translate: An optional post-rotate translation (a 2-tuple).
:param fillcolor: An optional color for area outside the rotated image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
angle = angle % 360.0
# Fast paths regardless of filter, as long as we're not
# translating or changing the center.
if not (center or translate):
if angle == 0:
return self.copy()
if angle == 180:
return self.transpose(Transpose.ROTATE_180)
if angle in (90, 270) and (expand or self.width == self.height):
return self.transpose(
Transpose.ROTATE_90 if angle == 90 else Transpose.ROTATE_270
)
# Calculate the affine matrix. Note that this is the reverse
# transformation (from destination image to source) because we
# want to interpolate the (discrete) destination pixel from
# the local area around the (floating) source pixel.
# The matrix we actually want (note that it operates from the right):
# (1, 0, tx) (1, 0, cx) ( cos a, sin a, 0) (1, 0, -cx)
# (0, 1, ty) * (0, 1, cy) * (-sin a, cos a, 0) * (0, 1, -cy)
# (0, 0, 1) (0, 0, 1) ( 0, 0, 1) (0, 0, 1)
# The reverse matrix is thus:
# (1, 0, cx) ( cos -a, sin -a, 0) (1, 0, -cx) (1, 0, -tx)
# (0, 1, cy) * (-sin -a, cos -a, 0) * (0, 1, -cy) * (0, 1, -ty)
# (0, 0, 1) ( 0, 0, 1) (0, 0, 1) (0, 0, 1)
# In any case, the final translation may be updated at the end to
# compensate for the expand flag.
w, h = self.size
if translate is None:
post_trans = (0, 0)
else:
post_trans = translate
if center is None:
# FIXME These should be rounded to ints?
rotn_center = (w / 2.0, h / 2.0)
else:
rotn_center = center
angle = -math.radians(angle)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
if expand:
# calculate output size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = transform(x, y, matrix)
xx.append(x)
yy.append(y)
nw = math.ceil(max(xx)) - math.floor(min(xx))
nh = math.ceil(max(yy)) - math.floor(min(yy))
# We multiply a translation matrix from the right. Because of its
# special form, this is the same as taking the image of the
# translation vector as new translation vector.
matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix)
w, h = nw, nh
return self.transform(
(w, h), Transform.AFFINE, matrix, resample, fillcolor=fillcolor
)
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), pathlib.Path object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param params: Extra parameters to the image writer.
:returns: None
:exception ValueError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception OSError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
filename = ""
open_fp = False
if isinstance(fp, Path):
filename = str(fp)
open_fp = True
elif is_path(fp):
filename = fp
open_fp = True
elif fp == sys.stdout:
try:
fp = sys.stdout.buffer
except AttributeError:
pass
if not filename and hasattr(fp, "name") and is_path(fp.name):
# only set the name for metadata purposes
filename = fp.name
# may mutate self!
self._ensure_mutable()
save_all = params.pop("save_all", False)
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = os.path.splitext(filename)[1].lower()
if not format:
if ext not in EXTENSION:
init()
try:
format = EXTENSION[ext]
except KeyError as e:
msg = f"unknown file extension: {ext}"
raise ValueError(msg) from e
if format.upper() not in SAVE:
init()
if save_all:
save_handler = SAVE_ALL[format.upper()]
else:
save_handler = SAVE[format.upper()]
created = False
if open_fp:
created = not os.path.exists(filename)
if params.get("append", False):
# Open also for reading ("+"), because TIFF save_all
# writer needs to go back and edit the written data.
fp = builtins.open(filename, "r+b")
else:
fp = builtins.open(filename, "w+b")
try:
save_handler(self, fp, filename)
except Exception:
if open_fp:
fp.close()
if created:
try:
os.remove(filename)
except PermissionError:
pass
raise
if open_fp:
fp.close()
def seek(self, frame):
"""
Seeks to the given frame in this sequence file. If you seek
beyond the end of the sequence, the method raises an
``EOFError`` exception. When a sequence file is opened, the
library automatically seeks to frame 0.
See :py:meth:`~PIL.Image.Image.tell`.
If defined, :attr:`~PIL.Image.Image.n_frames` refers to the
number of available frames.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
# overridden by file handlers
if frame != 0:
raise EOFError
def show(self, title=None):
"""
Displays this image. This method is mainly intended for debugging purposes.
This method calls :py:func:`PIL.ImageShow.show` internally. You can use
:py:func:`PIL.ImageShow.register` to override its default behaviour.
The image is first saved to a temporary file. By default, it will be in
PNG format.
On Unix, the image is then opened using the **display**, **eog** or
**xv** utility, depending on which one can be found.
On macOS, the image is opened with the native Preview application.
On Windows, the image is opened with the standard PNG display utility.
:param title: Optional title to use for the image window, where possible.
"""
_show(self, title=title)
def split(self):
"""
Split this image into individual bands. This method returns a
tuple of individual image bands from an image. For example,
splitting an "RGB" image creates three new images each
containing a copy of one of the original bands (red, green,
blue).
If you need only one band, :py:meth:`~PIL.Image.Image.getchannel`
method can be more convenient and faster.
:returns: A tuple containing bands.
"""
self.load()
if self.im.bands == 1:
ims = [self.copy()]
else:
ims = map(self._new, self.im.split())
return tuple(ims)
def getchannel(self, channel):
"""
Returns an image containing a single channel of the source image.
:param channel: What channel to return. Could be index
(0 for "R" channel of "RGB") or channel name
("A" for alpha channel of "RGBA").
:returns: An image in "L" mode.
.. versionadded:: 4.3.0
"""
self.load()
if isinstance(channel, str):
try:
channel = self.getbands().index(channel)
except ValueError as e:
msg = f'The image has no channel "{channel}"'
raise ValueError(msg) from e
return self._new(self.im.getband(channel))
def tell(self):
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
If defined, :attr:`~PIL.Image.Image.n_frames` refers to the
number of available frames.
:returns: Frame number, starting with 0.
"""
return 0
def thumbnail(self, size, resample=Resampling.BICUBIC, reducing_gap=2.0):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: Optional resampling filter. This can be one
of :py:data:`Resampling.NEAREST`, :py:data:`Resampling.BOX`,
:py:data:`Resampling.BILINEAR`, :py:data:`Resampling.HAMMING`,
:py:data:`Resampling.BICUBIC` or :py:data:`Resampling.LANCZOS`.
If omitted, it defaults to :py:data:`Resampling.BICUBIC`.
(was :py:data:`Resampling.NEAREST` prior to version 2.5.0).
See: :ref:`concept-filters`.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce` or
:py:meth:`~PIL.Image.Image.draft` for JPEG images.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is 2.0 (very close to fair resampling
while still being faster in many cases).
:returns: None
"""
provided_size = tuple(map(math.floor, size))
def preserve_aspect_ratio():
def round_aspect(number, key):
return max(min(math.floor(number), math.ceil(number), key=key), 1)
x, y = provided_size
if x >= self.width and y >= self.height:
return
aspect = self.width / self.height
if x / y >= aspect:
x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y))
else:
y = round_aspect(
x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n)
)
return x, y
box = None
if reducing_gap is not None:
size = preserve_aspect_ratio()
if size is None:
return
res = self.draft(None, (size[0] * reducing_gap, size[1] * reducing_gap))
if res is not None:
box = res[1]
if box is None:
self.load()
# load() may have changed the size of the image
size = preserve_aspect_ratio()
if size is None:
return
if self.size != size:
im = self.resize(size, resample, box=box, reducing_gap=reducing_gap)
self.im = im.im
self._size = size
self.mode = self.im.mode
self.readonly = 0
self.pyaccess = None
# FIXME: the different transform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
def transform(
self,
size,
method,
data=None,
resample=Resampling.NEAREST,
fill=1,
fillcolor=None,
):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size in pixels, as a 2-tuple:
(width, height).
:param method: The transformation method. This is one of
:py:data:`Transform.EXTENT` (cut out a rectangular subregion),
:py:data:`Transform.AFFINE` (affine transform),
:py:data:`Transform.PERSPECTIVE` (perspective transform),
:py:data:`Transform.QUAD` (map a quadrilateral to a rectangle), or
:py:data:`Transform.MESH` (map a number of source quadrilaterals
in one operation).
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
object::
class Example(Image.ImageTransformHandler):
def transform(self, size, data, resample, fill=1):
# Return result
It may also be an object with a ``method.getdata`` method
that returns a tuple supplying new ``method`` and ``data`` values::
class Example:
def getdata(self):
method = Image.Transform.EXTENT
data = (0, 0, 100, 100)
return method, data
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:data:`Resampling.NEAREST` (use nearest neighbour),
:py:data:`Resampling.BILINEAR` (linear interpolation in a 2x2
environment), or :py:data:`Resampling.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:data:`Resampling.NEAREST`.
See: :ref:`concept-filters`.
:param fill: If ``method`` is an
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
the arguments passed to it. Otherwise, it is unused.
:param fillcolor: Optional fill color for the area outside the
transform in the output image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if self.mode in ("LA", "RGBA") and resample != Resampling.NEAREST:
return (
self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode])
.transform(size, method, data, resample, fill, fillcolor)
.convert(self.mode)
)
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
msg = "missing method data"
raise ValueError(msg)
im = new(self.mode, size, fillcolor)
if self.mode == "P" and self.palette:
im.palette = self.palette.copy()
im.info = self.info.copy()
if method == Transform.MESH:
# list of quads
for box, quad in data:
im.__transformer(
box, self, Transform.QUAD, quad, resample, fillcolor is None
)
else:
im.__transformer(
(0, 0) + size, self, method, data, resample, fillcolor is None
)
return im
def __transformer(
self, box, image, method, data, resample=Resampling.NEAREST, fill=1
):
w = box[2] - box[0]
h = box[3] - box[1]
if method == Transform.AFFINE:
data = data[:6]
elif method == Transform.EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = (x1 - x0) / w
ys = (y1 - y0) / h
method = Transform.AFFINE
data = (xs, 0, x0, 0, ys, y0)
elif method == Transform.PERSPECTIVE:
data = data[:8]
elif method == Transform.QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[:2]
sw = data[2:4]
se = data[4:6]
ne = data[6:8]
x0, y0 = nw
As = 1.0 / w
At = 1.0 / h
data = (
x0,
(ne[0] - x0) * As,
(sw[0] - x0) * At,
(se[0] - sw[0] - ne[0] + x0) * As * At,
y0,
(ne[1] - y0) * As,
(sw[1] - y0) * At,
(se[1] - sw[1] - ne[1] + y0) * As * At,
)
else:
msg = "unknown transformation method"
raise ValueError(msg)
if resample not in (
Resampling.NEAREST,
Resampling.BILINEAR,
Resampling.BICUBIC,
):
if resample in (Resampling.BOX, Resampling.HAMMING, Resampling.LANCZOS):
msg = {
Resampling.BOX: "Image.Resampling.BOX",
Resampling.HAMMING: "Image.Resampling.HAMMING",
Resampling.LANCZOS: "Image.Resampling.LANCZOS",
}[resample] + f" ({resample}) cannot be used."
else:
msg = f"Unknown resampling filter ({resample})."
filters = [
f"{filter[1]} ({filter[0]})"
for filter in (
(Resampling.NEAREST, "Image.Resampling.NEAREST"),
(Resampling.BILINEAR, "Image.Resampling.BILINEAR"),
(Resampling.BICUBIC, "Image.Resampling.BICUBIC"),
)
]
msg += " Use " + ", ".join(filters[:-1]) + " or " + filters[-1]
raise ValueError(msg)
image.load()
self.load()
if image.mode in ("1", "P"):
resample = Resampling.NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:data:`Transpose.FLIP_LEFT_RIGHT`,
:py:data:`Transpose.FLIP_TOP_BOTTOM`, :py:data:`Transpose.ROTATE_90`,
:py:data:`Transpose.ROTATE_180`, :py:data:`Transpose.ROTATE_270`,
:py:data:`Transpose.TRANSPOSE` or :py:data:`Transpose.TRANSVERSE`.
:returns: Returns a flipped or rotated copy of this image.
"""
self.load()
return self._new(self.im.transpose(method))
def effect_spread(self, distance):
"""
Randomly spread pixels in an image.
:param distance: Distance to spread pixels.
"""
self.load()
return self._new(self.im.effect_spread(distance))
def toqimage(self):
"""Returns a QImage copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
msg = "Qt bindings are not installed"
raise ImportError(msg)
return ImageQt.toqimage(self)
def toqpixmap(self):
"""Returns a QPixmap copy of this image"""
from . import ImageQt
if not ImageQt.qt_is_installed:
msg = "Qt bindings are not installed"
raise ImportError(msg)
return ImageQt.toqpixmap(self)
The provided code snippet includes necessary dependencies for implementing the `_patch_colormap_display` function. Write a Python function `def _patch_colormap_display()` to solve the following problem:
Simplify the rich display of matplotlib color maps in a notebook.
Here is the function:
def _patch_colormap_display():
"""Simplify the rich display of matplotlib color maps in a notebook."""
def _repr_png_(self):
"""Generate a PNG representation of the Colormap."""
import io
from PIL import Image
import numpy as np
IMAGE_SIZE = (400, 50)
X = np.tile(np.linspace(0, 1, IMAGE_SIZE[0]), (IMAGE_SIZE[1], 1))
pixels = self(X, bytes=True)
png_bytes = io.BytesIO()
Image.fromarray(pixels).save(png_bytes, format='png')
return png_bytes.getvalue()
def _repr_html_(self):
"""Generate an HTML representation of the Colormap."""
import base64
png_bytes = self._repr_png_()
png_base64 = base64.b64encode(png_bytes).decode('ascii')
return ('<img '
+ 'alt="' + self.name + ' color map" '
+ 'title="' + self.name + '"'
+ 'src="data:image/png;base64,' + png_base64 + '">')
mpl.colors.Colormap._repr_png_ = _repr_png_
mpl.colors.Colormap._repr_html_ = _repr_html_ | Simplify the rich display of matplotlib color maps in a notebook. |
171,840 | import colorsys
from itertools import cycle
import numpy as np
import matplotlib as mpl
from .external import husl
from .utils import desaturate, get_color_cycle
from .colors import xkcd_rgb, crayons
from ._compat import get_colormap
def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):
"""Return a list of colors or continuous colormap defining a palette.
Possible ``palette`` values include:
- Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)
- Name of matplotlib colormap
- 'husl' or 'hls'
- 'ch:<cubehelix arguments>'
- 'light:<color>', 'dark:<color>', 'blend:<color>,<color>',
- A sequence of colors in any format matplotlib accepts
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
See the :ref:`tutorial <palette_tutorial>` for more information.
Parameters
----------
palette : None, string, or sequence, optional
Name of palette or None to return current palette. If a sequence, input
colors are used but possibly cycled and desaturated.
n_colors : int, optional
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors,
but grabbing the current palette or passing in a list of colors will
not change the number of colors unless this is specified. Asking for
more colors than exist in the palette will cause it to cycle. Ignored
when ``as_cmap`` is True.
desat : float, optional
Proportion to desaturate each color by.
as_cmap : bool
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
set_palette : Set the default color cycle for all plots.
set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the seaborn palettes.
Examples
--------
.. include:: ../docstrings/color_palette.rst
"""
if palette is None:
palette = get_color_cycle()
if n_colors is None:
n_colors = len(palette)
elif not isinstance(palette, str):
palette = palette
if n_colors is None:
n_colors = len(palette)
else:
if n_colors is None:
# Use all colors in a qualitative palette or 6 of another kind
n_colors = QUAL_PALETTE_SIZES.get(palette, 6)
if palette in SEABORN_PALETTES:
# Named "seaborn variant" of matplotlib default color cycle
palette = SEABORN_PALETTES[palette]
elif palette == "hls":
# Evenly spaced colors in cylindrical RGB space
palette = hls_palette(n_colors, as_cmap=as_cmap)
elif palette == "husl":
# Evenly spaced colors in cylindrical Lab space
palette = husl_palette(n_colors, as_cmap=as_cmap)
elif palette.lower() == "jet":
# Paternalism
raise ValueError("No.")
elif palette.startswith("ch:"):
# Cubehelix palette with params specified in string
args, kwargs = _parse_cubehelix_args(palette)
palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)
elif palette.startswith("light:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("dark:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("blend:"):
# blend palette between colors specified in string
_, colors = palette.split(":")
colors = colors.split(",")
palette = blend_palette(colors, n_colors, as_cmap=as_cmap)
else:
try:
# Perhaps a named matplotlib colormap?
palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)
except (ValueError, KeyError): # Error class changed in mpl36
raise ValueError(f"{palette!r} is not a valid palette name")
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
if not as_cmap:
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError(f"Could not generate a palette for {palette}")
return palette
xkcd_rgb = {'acid green': '#8ffe09',
'adobe': '#bd6c48',
'algae': '#54ac68',
'algae green': '#21c36f',
'almost black': '#070d0d',
'amber': '#feb308',
'amethyst': '#9b5fc0',
'apple': '#6ecb3c',
'apple green': '#76cd26',
'apricot': '#ffb16d',
'aqua': '#13eac9',
'aqua blue': '#02d8e9',
'aqua green': '#12e193',
'aqua marine': '#2ee8bb',
'aquamarine': '#04d8b2',
'army green': '#4b5d16',
'asparagus': '#77ab56',
'aubergine': '#3d0734',
'auburn': '#9a3001',
'avocado': '#90b134',
'avocado green': '#87a922',
'azul': '#1d5dec',
'azure': '#069af3',
'baby blue': '#a2cffe',
'baby green': '#8cff9e',
'baby pink': '#ffb7ce',
'baby poo': '#ab9004',
'baby poop': '#937c00',
'baby poop green': '#8f9805',
'baby puke green': '#b6c406',
'baby purple': '#ca9bf7',
'baby shit brown': '#ad900d',
'baby shit green': '#889717',
'banana': '#ffff7e',
'banana yellow': '#fafe4b',
'barbie pink': '#fe46a5',
'barf green': '#94ac02',
'barney': '#ac1db8',
'barney purple': '#a00498',
'battleship grey': '#6b7c85',
'beige': '#e6daa6',
'berry': '#990f4b',
'bile': '#b5c306',
'black': '#000000',
'bland': '#afa88b',
'blood': '#770001',
'blood orange': '#fe4b03',
'blood red': '#980002',
'blue': '#0343df',
'blue blue': '#2242c7',
'blue green': '#137e6d',
'blue grey': '#607c8e',
'blue purple': '#5729ce',
'blue violet': '#5d06e9',
'blue with a hint of purple': '#533cc6',
'blue/green': '#0f9b8e',
'blue/grey': '#758da3',
'blue/purple': '#5a06ef',
'blueberry': '#464196',
'bluegreen': '#017a79',
'bluegrey': '#85a3b2',
'bluey green': '#2bb179',
'bluey grey': '#89a0b0',
'bluey purple': '#6241c7',
'bluish': '#2976bb',
'bluish green': '#10a674',
'bluish grey': '#748b97',
'bluish purple': '#703be7',
'blurple': '#5539cc',
'blush': '#f29e8e',
'blush pink': '#fe828c',
'booger': '#9bb53c',
'booger green': '#96b403',
'bordeaux': '#7b002c',
'boring green': '#63b365',
'bottle green': '#044a05',
'brick': '#a03623',
'brick orange': '#c14a09',
'brick red': '#8f1402',
'bright aqua': '#0bf9ea',
'bright blue': '#0165fc',
'bright cyan': '#41fdfe',
'bright green': '#01ff07',
'bright lavender': '#c760ff',
'bright light blue': '#26f7fd',
'bright light green': '#2dfe54',
'bright lilac': '#c95efb',
'bright lime': '#87fd05',
'bright lime green': '#65fe08',
'bright magenta': '#ff08e8',
'bright olive': '#9cbb04',
'bright orange': '#ff5b00',
'bright pink': '#fe01b1',
'bright purple': '#be03fd',
'bright red': '#ff000d',
'bright sea green': '#05ffa6',
'bright sky blue': '#02ccfe',
'bright teal': '#01f9c6',
'bright turquoise': '#0ffef9',
'bright violet': '#ad0afd',
'bright yellow': '#fffd01',
'bright yellow green': '#9dff00',
'british racing green': '#05480d',
'bronze': '#a87900',
'brown': '#653700',
'brown green': '#706c11',
'brown grey': '#8d8468',
'brown orange': '#b96902',
'brown red': '#922b05',
'brown yellow': '#b29705',
'brownish': '#9c6d57',
'brownish green': '#6a6e09',
'brownish grey': '#86775f',
'brownish orange': '#cb7723',
'brownish pink': '#c27e79',
'brownish purple': '#76424e',
'brownish red': '#9e3623',
'brownish yellow': '#c9b003',
'browny green': '#6f6c0a',
'browny orange': '#ca6b02',
'bruise': '#7e4071',
'bubble gum pink': '#ff69af',
'bubblegum': '#ff6cb5',
'bubblegum pink': '#fe83cc',
'buff': '#fef69e',
'burgundy': '#610023',
'burnt orange': '#c04e01',
'burnt red': '#9f2305',
'burnt siena': '#b75203',
'burnt sienna': '#b04e0f',
'burnt umber': '#a0450e',
'burnt yellow': '#d5ab09',
'burple': '#6832e3',
'butter': '#ffff81',
'butter yellow': '#fffd74',
'butterscotch': '#fdb147',
'cadet blue': '#4e7496',
'camel': '#c69f59',
'camo': '#7f8f4e',
'camo green': '#526525',
'camouflage green': '#4b6113',
'canary': '#fdff63',
'canary yellow': '#fffe40',
'candy pink': '#ff63e9',
'caramel': '#af6f09',
'carmine': '#9d0216',
'carnation': '#fd798f',
'carnation pink': '#ff7fa7',
'carolina blue': '#8ab8fe',
'celadon': '#befdb7',
'celery': '#c1fd95',
'cement': '#a5a391',
'cerise': '#de0c62',
'cerulean': '#0485d1',
'cerulean blue': '#056eee',
'charcoal': '#343837',
'charcoal grey': '#3c4142',
'chartreuse': '#c1f80a',
'cherry': '#cf0234',
'cherry red': '#f7022a',
'chestnut': '#742802',
'chocolate': '#3d1c02',
'chocolate brown': '#411900',
'cinnamon': '#ac4f06',
'claret': '#680018',
'clay': '#b66a50',
'clay brown': '#b2713d',
'clear blue': '#247afd',
'cloudy blue': '#acc2d9',
'cobalt': '#1e488f',
'cobalt blue': '#030aa7',
'cocoa': '#875f42',
'coffee': '#a6814c',
'cool blue': '#4984b8',
'cool green': '#33b864',
'cool grey': '#95a3a6',
'copper': '#b66325',
'coral': '#fc5a50',
'coral pink': '#ff6163',
'cornflower': '#6a79f7',
'cornflower blue': '#5170d7',
'cranberry': '#9e003a',
'cream': '#ffffc2',
'creme': '#ffffb6',
'crimson': '#8c000f',
'custard': '#fffd78',
'cyan': '#00ffff',
'dandelion': '#fedf08',
'dark': '#1b2431',
'dark aqua': '#05696b',
'dark aquamarine': '#017371',
'dark beige': '#ac9362',
'dark blue': '#00035b',
'dark blue green': '#005249',
'dark blue grey': '#1f3b4d',
'dark brown': '#341c02',
'dark coral': '#cf524e',
'dark cream': '#fff39a',
'dark cyan': '#0a888a',
'dark forest green': '#002d04',
'dark fuchsia': '#9d0759',
'dark gold': '#b59410',
'dark grass green': '#388004',
'dark green': '#033500',
'dark green blue': '#1f6357',
'dark grey': '#363737',
'dark grey blue': '#29465b',
'dark hot pink': '#d90166',
'dark indigo': '#1f0954',
'dark khaki': '#9b8f55',
'dark lavender': '#856798',
'dark lilac': '#9c6da5',
'dark lime': '#84b701',
'dark lime green': '#7ebd01',
'dark magenta': '#960056',
'dark maroon': '#3c0008',
'dark mauve': '#874c62',
'dark mint': '#48c072',
'dark mint green': '#20c073',
'dark mustard': '#a88905',
'dark navy': '#000435',
'dark navy blue': '#00022e',
'dark olive': '#373e02',
'dark olive green': '#3c4d03',
'dark orange': '#c65102',
'dark pastel green': '#56ae57',
'dark peach': '#de7e5d',
'dark periwinkle': '#665fd1',
'dark pink': '#cb416b',
'dark plum': '#3f012c',
'dark purple': '#35063e',
'dark red': '#840000',
'dark rose': '#b5485d',
'dark royal blue': '#02066f',
'dark sage': '#598556',
'dark salmon': '#c85a53',
'dark sand': '#a88f59',
'dark sea green': '#11875d',
'dark seafoam': '#1fb57a',
'dark seafoam green': '#3eaf76',
'dark sky blue': '#448ee4',
'dark slate blue': '#214761',
'dark tan': '#af884a',
'dark taupe': '#7f684e',
'dark teal': '#014d4e',
'dark turquoise': '#045c5a',
'dark violet': '#34013f',
'dark yellow': '#d5b60a',
'dark yellow green': '#728f02',
'darkblue': '#030764',
'darkgreen': '#054907',
'darkish blue': '#014182',
'darkish green': '#287c37',
'darkish pink': '#da467d',
'darkish purple': '#751973',
'darkish red': '#a90308',
'deep aqua': '#08787f',
'deep blue': '#040273',
'deep brown': '#410200',
'deep green': '#02590f',
'deep lavender': '#8d5eb7',
'deep lilac': '#966ebd',
'deep magenta': '#a0025c',
'deep orange': '#dc4d01',
'deep pink': '#cb0162',
'deep purple': '#36013f',
'deep red': '#9a0200',
'deep rose': '#c74767',
'deep sea blue': '#015482',
'deep sky blue': '#0d75f8',
'deep teal': '#00555a',
'deep turquoise': '#017374',
'deep violet': '#490648',
'denim': '#3b638c',
'denim blue': '#3b5b92',
'desert': '#ccad60',
'diarrhea': '#9f8303',
'dirt': '#8a6e45',
'dirt brown': '#836539',
'dirty blue': '#3f829d',
'dirty green': '#667e2c',
'dirty orange': '#c87606',
'dirty pink': '#ca7b80',
'dirty purple': '#734a65',
'dirty yellow': '#cdc50a',
'dodger blue': '#3e82fc',
'drab': '#828344',
'drab green': '#749551',
'dried blood': '#4b0101',
'duck egg blue': '#c3fbf4',
'dull blue': '#49759c',
'dull brown': '#876e4b',
'dull green': '#74a662',
'dull orange': '#d8863b',
'dull pink': '#d5869d',
'dull purple': '#84597e',
'dull red': '#bb3f3f',
'dull teal': '#5f9e8f',
'dull yellow': '#eedc5b',
'dusk': '#4e5481',
'dusk blue': '#26538d',
'dusky blue': '#475f94',
'dusky pink': '#cc7a8b',
'dusky purple': '#895b7b',
'dusky rose': '#ba6873',
'dust': '#b2996e',
'dusty blue': '#5a86ad',
'dusty green': '#76a973',
'dusty lavender': '#ac86a8',
'dusty orange': '#f0833a',
'dusty pink': '#d58a94',
'dusty purple': '#825f87',
'dusty red': '#b9484e',
'dusty rose': '#c0737a',
'dusty teal': '#4c9085',
'earth': '#a2653e',
'easter green': '#8cfd7e',
'easter purple': '#c071fe',
'ecru': '#feffca',
'egg shell': '#fffcc4',
'eggplant': '#380835',
'eggplant purple': '#430541',
'eggshell': '#ffffd4',
'eggshell blue': '#c4fff7',
'electric blue': '#0652ff',
'electric green': '#21fc0d',
'electric lime': '#a8ff04',
'electric pink': '#ff0490',
'electric purple': '#aa23ff',
'emerald': '#01a049',
'emerald green': '#028f1e',
'evergreen': '#05472a',
'faded blue': '#658cbb',
'faded green': '#7bb274',
'faded orange': '#f0944d',
'faded pink': '#de9dac',
'faded purple': '#916e99',
'faded red': '#d3494e',
'faded yellow': '#feff7f',
'fawn': '#cfaf7b',
'fern': '#63a950',
'fern green': '#548d44',
'fire engine red': '#fe0002',
'flat blue': '#3c73a8',
'flat green': '#699d4c',
'fluorescent green': '#08ff08',
'fluro green': '#0aff02',
'foam green': '#90fda9',
'forest': '#0b5509',
'forest green': '#06470c',
'forrest green': '#154406',
'french blue': '#436bad',
'fresh green': '#69d84f',
'frog green': '#58bc08',
'fuchsia': '#ed0dd9',
'gold': '#dbb40c',
'golden': '#f5bf03',
'golden brown': '#b27a01',
'golden rod': '#f9bc08',
'golden yellow': '#fec615',
'goldenrod': '#fac205',
'grape': '#6c3461',
'grape purple': '#5d1451',
'grapefruit': '#fd5956',
'grass': '#5cac2d',
'grass green': '#3f9b0b',
'grassy green': '#419c03',
'green': '#15b01a',
'green apple': '#5edc1f',
'green blue': '#06b48b',
'green brown': '#544e03',
'green grey': '#77926f',
'green teal': '#0cb577',
'green yellow': '#c9ff27',
'green/blue': '#01c08d',
'green/yellow': '#b5ce08',
'greenblue': '#23c48b',
'greenish': '#40a368',
'greenish beige': '#c9d179',
'greenish blue': '#0b8b87',
'greenish brown': '#696112',
'greenish cyan': '#2afeb7',
'greenish grey': '#96ae8d',
'greenish tan': '#bccb7a',
'greenish teal': '#32bf84',
'greenish turquoise': '#00fbb0',
'greenish yellow': '#cdfd02',
'greeny blue': '#42b395',
'greeny brown': '#696006',
'greeny grey': '#7ea07a',
'greeny yellow': '#c6f808',
'grey': '#929591',
'grey blue': '#6b8ba4',
'grey brown': '#7f7053',
'grey green': '#789b73',
'grey pink': '#c3909b',
'grey purple': '#826d8c',
'grey teal': '#5e9b8a',
'grey/blue': '#647d8e',
'grey/green': '#86a17d',
'greyblue': '#77a1b5',
'greyish': '#a8a495',
'greyish blue': '#5e819d',
'greyish brown': '#7a6a4f',
'greyish green': '#82a67d',
'greyish pink': '#c88d94',
'greyish purple': '#887191',
'greyish teal': '#719f91',
'gross green': '#a0bf16',
'gunmetal': '#536267',
'hazel': '#8e7618',
'heather': '#a484ac',
'heliotrope': '#d94ff5',
'highlighter green': '#1bfc06',
'hospital green': '#9be5aa',
'hot green': '#25ff29',
'hot magenta': '#f504c9',
'hot pink': '#ff028d',
'hot purple': '#cb00f5',
'hunter green': '#0b4008',
'ice': '#d6fffa',
'ice blue': '#d7fffe',
'icky green': '#8fae22',
'indian red': '#850e04',
'indigo': '#380282',
'indigo blue': '#3a18b1',
'iris': '#6258c4',
'irish green': '#019529',
'ivory': '#ffffcb',
'jade': '#1fa774',
'jade green': '#2baf6a',
'jungle green': '#048243',
'kelley green': '#009337',
'kelly green': '#02ab2e',
'kermit green': '#5cb200',
'key lime': '#aeff6e',
'khaki': '#aaa662',
'khaki green': '#728639',
'kiwi': '#9cef43',
'kiwi green': '#8ee53f',
'lavender': '#c79fef',
'lavender blue': '#8b88f8',
'lavender pink': '#dd85d7',
'lawn green': '#4da409',
'leaf': '#71aa34',
'leaf green': '#5ca904',
'leafy green': '#51b73b',
'leather': '#ac7434',
'lemon': '#fdff52',
'lemon green': '#adf802',
'lemon lime': '#bffe28',
'lemon yellow': '#fdff38',
'lichen': '#8fb67b',
'light aqua': '#8cffdb',
'light aquamarine': '#7bfdc7',
'light beige': '#fffeb6',
'light blue': '#95d0fc',
'light blue green': '#7efbb3',
'light blue grey': '#b7c9e2',
'light bluish green': '#76fda8',
'light bright green': '#53fe5c',
'light brown': '#ad8150',
'light burgundy': '#a8415b',
'light cyan': '#acfffc',
'light eggplant': '#894585',
'light forest green': '#4f9153',
'light gold': '#fddc5c',
'light grass green': '#9af764',
'light green': '#96f97b',
'light green blue': '#56fca2',
'light greenish blue': '#63f7b4',
'light grey': '#d8dcd6',
'light grey blue': '#9dbcd4',
'light grey green': '#b7e1a1',
'light indigo': '#6d5acf',
'light khaki': '#e6f2a2',
'light lavendar': '#efc0fe',
'light lavender': '#dfc5fe',
'light light blue': '#cafffb',
'light light green': '#c8ffb0',
'light lilac': '#edc8ff',
'light lime': '#aefd6c',
'light lime green': '#b9ff66',
'light magenta': '#fa5ff7',
'light maroon': '#a24857',
'light mauve': '#c292a1',
'light mint': '#b6ffbb',
'light mint green': '#a6fbb2',
'light moss green': '#a6c875',
'light mustard': '#f7d560',
'light navy': '#155084',
'light navy blue': '#2e5a88',
'light neon green': '#4efd54',
'light olive': '#acbf69',
'light olive green': '#a4be5c',
'light orange': '#fdaa48',
'light pastel green': '#b2fba5',
'light pea green': '#c4fe82',
'light peach': '#ffd8b1',
'light periwinkle': '#c1c6fc',
'light pink': '#ffd1df',
'light plum': '#9d5783',
'light purple': '#bf77f6',
'light red': '#ff474c',
'light rose': '#ffc5cb',
'light royal blue': '#3a2efe',
'light sage': '#bcecac',
'light salmon': '#fea993',
'light sea green': '#98f6b0',
'light seafoam': '#a0febf',
'light seafoam green': '#a7ffb5',
'light sky blue': '#c6fcff',
'light tan': '#fbeeac',
'light teal': '#90e4c1',
'light turquoise': '#7ef4cc',
'light urple': '#b36ff6',
'light violet': '#d6b4fc',
'light yellow': '#fffe7a',
'light yellow green': '#ccfd7f',
'light yellowish green': '#c2ff89',
'lightblue': '#7bc8f6',
'lighter green': '#75fd63',
'lighter purple': '#a55af4',
'lightgreen': '#76ff7b',
'lightish blue': '#3d7afd',
'lightish green': '#61e160',
'lightish purple': '#a552e6',
'lightish red': '#fe2f4a',
'lilac': '#cea2fd',
'liliac': '#c48efd',
'lime': '#aaff32',
'lime green': '#89fe05',
'lime yellow': '#d0fe1d',
'lipstick': '#d5174e',
'lipstick red': '#c0022f',
'macaroni and cheese': '#efb435',
'magenta': '#c20078',
'mahogany': '#4a0100',
'maize': '#f4d054',
'mango': '#ffa62b',
'manilla': '#fffa86',
'marigold': '#fcc006',
'marine': '#042e60',
'marine blue': '#01386a',
'maroon': '#650021',
'mauve': '#ae7181',
'medium blue': '#2c6fbb',
'medium brown': '#7f5112',
'medium green': '#39ad48',
'medium grey': '#7d7f7c',
'medium pink': '#f36196',
'medium purple': '#9e43a2',
'melon': '#ff7855',
'merlot': '#730039',
'metallic blue': '#4f738e',
'mid blue': '#276ab3',
'mid green': '#50a747',
'midnight': '#03012d',
'midnight blue': '#020035',
'midnight purple': '#280137',
'military green': '#667c3e',
'milk chocolate': '#7f4e1e',
'mint': '#9ffeb0',
'mint green': '#8fff9f',
'minty green': '#0bf77d',
'mocha': '#9d7651',
'moss': '#769958',
'moss green': '#658b38',
'mossy green': '#638b27',
'mud': '#735c12',
'mud brown': '#60460f',
'mud green': '#606602',
'muddy brown': '#886806',
'muddy green': '#657432',
'muddy yellow': '#bfac05',
'mulberry': '#920a4e',
'murky green': '#6c7a0e',
'mushroom': '#ba9e88',
'mustard': '#ceb301',
'mustard brown': '#ac7e04',
'mustard green': '#a8b504',
'mustard yellow': '#d2bd0a',
'muted blue': '#3b719f',
'muted green': '#5fa052',
'muted pink': '#d1768f',
'muted purple': '#805b87',
'nasty green': '#70b23f',
'navy': '#01153e',
'navy blue': '#001146',
'navy green': '#35530a',
'neon blue': '#04d9ff',
'neon green': '#0cff0c',
'neon pink': '#fe019a',
'neon purple': '#bc13fe',
'neon red': '#ff073a',
'neon yellow': '#cfff04',
'nice blue': '#107ab0',
'night blue': '#040348',
'ocean': '#017b92',
'ocean blue': '#03719c',
'ocean green': '#3d9973',
'ocher': '#bf9b0c',
'ochre': '#bf9005',
'ocre': '#c69c04',
'off blue': '#5684ae',
'off green': '#6ba353',
'off white': '#ffffe4',
'off yellow': '#f1f33f',
'old pink': '#c77986',
'old rose': '#c87f89',
'olive': '#6e750e',
'olive brown': '#645403',
'olive drab': '#6f7632',
'olive green': '#677a04',
'olive yellow': '#c2b709',
'orange': '#f97306',
'orange brown': '#be6400',
'orange pink': '#ff6f52',
'orange red': '#fd411e',
'orange yellow': '#ffad01',
'orangeish': '#fd8d49',
'orangered': '#fe420f',
'orangey brown': '#b16002',
'orangey red': '#fa4224',
'orangey yellow': '#fdb915',
'orangish': '#fc824a',
'orangish brown': '#b25f03',
'orangish red': '#f43605',
'orchid': '#c875c4',
'pale': '#fff9d0',
'pale aqua': '#b8ffeb',
'pale blue': '#d0fefe',
'pale brown': '#b1916e',
'pale cyan': '#b7fffa',
'pale gold': '#fdde6c',
'pale green': '#c7fdb5',
'pale grey': '#fdfdfe',
'pale lavender': '#eecffe',
'pale light green': '#b1fc99',
'pale lilac': '#e4cbff',
'pale lime': '#befd73',
'pale lime green': '#b1ff65',
'pale magenta': '#d767ad',
'pale mauve': '#fed0fc',
'pale olive': '#b9cc81',
'pale olive green': '#b1d27b',
'pale orange': '#ffa756',
'pale peach': '#ffe5ad',
'pale pink': '#ffcfdc',
'pale purple': '#b790d4',
'pale red': '#d9544d',
'pale rose': '#fdc1c5',
'pale salmon': '#ffb19a',
'pale sky blue': '#bdf6fe',
'pale teal': '#82cbb2',
'pale turquoise': '#a5fbd5',
'pale violet': '#ceaefa',
'pale yellow': '#ffff84',
'parchment': '#fefcaf',
'pastel blue': '#a2bffe',
'pastel green': '#b0ff9d',
'pastel orange': '#ff964f',
'pastel pink': '#ffbacd',
'pastel purple': '#caa0ff',
'pastel red': '#db5856',
'pastel yellow': '#fffe71',
'pea': '#a4bf20',
'pea green': '#8eab12',
'pea soup': '#929901',
'pea soup green': '#94a617',
'peach': '#ffb07c',
'peachy pink': '#ff9a8a',
'peacock blue': '#016795',
'pear': '#cbf85f',
'periwinkle': '#8e82fe',
'periwinkle blue': '#8f99fb',
'perrywinkle': '#8f8ce7',
'petrol': '#005f6a',
'pig pink': '#e78ea5',
'pine': '#2b5d34',
'pine green': '#0a481e',
'pink': '#ff81c0',
'pink purple': '#db4bda',
'pink red': '#f5054f',
'pink/purple': '#ef1de7',
'pinkish': '#d46a7e',
'pinkish brown': '#b17261',
'pinkish grey': '#c8aca9',
'pinkish orange': '#ff724c',
'pinkish purple': '#d648d7',
'pinkish red': '#f10c45',
'pinkish tan': '#d99b82',
'pinky': '#fc86aa',
'pinky purple': '#c94cbe',
'pinky red': '#fc2647',
'piss yellow': '#ddd618',
'pistachio': '#c0fa8b',
'plum': '#580f41',
'plum purple': '#4e0550',
'poison green': '#40fd14',
'poo': '#8f7303',
'poo brown': '#885f01',
'poop': '#7f5e00',
'poop brown': '#7a5901',
'poop green': '#6f7c00',
'powder blue': '#b1d1fc',
'powder pink': '#ffb2d0',
'primary blue': '#0804f9',
'prussian blue': '#004577',
'puce': '#a57e52',
'puke': '#a5a502',
'puke brown': '#947706',
'puke green': '#9aae07',
'puke yellow': '#c2be0e',
'pumpkin': '#e17701',
'pumpkin orange': '#fb7d07',
'pure blue': '#0203e2',
'purple': '#7e1e9c',
'purple blue': '#632de9',
'purple brown': '#673a3f',
'purple grey': '#866f85',
'purple pink': '#e03fd8',
'purple red': '#990147',
'purple/blue': '#5d21d0',
'purple/pink': '#d725de',
'purpleish': '#98568d',
'purpleish blue': '#6140ef',
'purpleish pink': '#df4ec8',
'purpley': '#8756e4',
'purpley blue': '#5f34e7',
'purpley grey': '#947e94',
'purpley pink': '#c83cb9',
'purplish': '#94568c',
'purplish blue': '#601ef9',
'purplish brown': '#6b4247',
'purplish grey': '#7a687f',
'purplish pink': '#ce5dae',
'purplish red': '#b0054b',
'purply': '#983fb2',
'purply blue': '#661aee',
'purply pink': '#f075e6',
'putty': '#beae8a',
'racing green': '#014600',
'radioactive green': '#2cfa1f',
'raspberry': '#b00149',
'raw sienna': '#9a6200',
'raw umber': '#a75e09',
'really light blue': '#d4ffff',
'red': '#e50000',
'red brown': '#8b2e16',
'red orange': '#fd3c06',
'red pink': '#fa2a55',
'red purple': '#820747',
'red violet': '#9e0168',
'red wine': '#8c0034',
'reddish': '#c44240',
'reddish brown': '#7f2b0a',
'reddish grey': '#997570',
'reddish orange': '#f8481c',
'reddish pink': '#fe2c54',
'reddish purple': '#910951',
'reddy brown': '#6e1005',
'rich blue': '#021bf9',
'rich purple': '#720058',
'robin egg blue': '#8af1fe',
"robin's egg": '#6dedfd',
"robin's egg blue": '#98eff9',
'rosa': '#fe86a4',
'rose': '#cf6275',
'rose pink': '#f7879a',
'rose red': '#be013c',
'rosy pink': '#f6688e',
'rouge': '#ab1239',
'royal': '#0c1793',
'royal blue': '#0504aa',
'royal purple': '#4b006e',
'ruby': '#ca0147',
'russet': '#a13905',
'rust': '#a83c09',
'rust brown': '#8b3103',
'rust orange': '#c45508',
'rust red': '#aa2704',
'rusty orange': '#cd5909',
'rusty red': '#af2f0d',
'saffron': '#feb209',
'sage': '#87ae73',
'sage green': '#88b378',
'salmon': '#ff796c',
'salmon pink': '#fe7b7c',
'sand': '#e2ca76',
'sand brown': '#cba560',
'sand yellow': '#fce166',
'sandstone': '#c9ae74',
'sandy': '#f1da7a',
'sandy brown': '#c4a661',
'sandy yellow': '#fdee73',
'sap green': '#5c8b15',
'sapphire': '#2138ab',
'scarlet': '#be0119',
'sea': '#3c9992',
'sea blue': '#047495',
'sea green': '#53fca1',
'seafoam': '#80f9ad',
'seafoam blue': '#78d1b6',
'seafoam green': '#7af9ab',
'seaweed': '#18d17b',
'seaweed green': '#35ad6b',
'sepia': '#985e2b',
'shamrock': '#01b44c',
'shamrock green': '#02c14d',
'shit': '#7f5f00',
'shit brown': '#7b5804',
'shit green': '#758000',
'shocking pink': '#fe02a2',
'sick green': '#9db92c',
'sickly green': '#94b21c',
'sickly yellow': '#d0e429',
'sienna': '#a9561e',
'silver': '#c5c9c7',
'sky': '#82cafc',
'sky blue': '#75bbfd',
'slate': '#516572',
'slate blue': '#5b7c99',
'slate green': '#658d6d',
'slate grey': '#59656d',
'slime green': '#99cc04',
'snot': '#acbb0d',
'snot green': '#9dc100',
'soft blue': '#6488ea',
'soft green': '#6fc276',
'soft pink': '#fdb0c0',
'soft purple': '#a66fb5',
'spearmint': '#1ef876',
'spring green': '#a9f971',
'spruce': '#0a5f38',
'squash': '#f2ab15',
'steel': '#738595',
'steel blue': '#5a7d9a',
'steel grey': '#6f828a',
'stone': '#ada587',
'stormy blue': '#507b9c',
'straw': '#fcf679',
'strawberry': '#fb2943',
'strong blue': '#0c06f7',
'strong pink': '#ff0789',
'sun yellow': '#ffdf22',
'sunflower': '#ffc512',
'sunflower yellow': '#ffda03',
'sunny yellow': '#fff917',
'sunshine yellow': '#fffd37',
'swamp': '#698339',
'swamp green': '#748500',
'tan': '#d1b26f',
'tan brown': '#ab7e4c',
'tan green': '#a9be70',
'tangerine': '#ff9408',
'taupe': '#b9a281',
'tea': '#65ab7c',
'tea green': '#bdf8a3',
'teal': '#029386',
'teal blue': '#01889f',
'teal green': '#25a36f',
'tealish': '#24bca8',
'tealish green': '#0cdc73',
'terra cotta': '#c9643b',
'terracota': '#cb6843',
'terracotta': '#ca6641',
'tiffany blue': '#7bf2da',
'tomato': '#ef4026',
'tomato red': '#ec2d01',
'topaz': '#13bbaf',
'toupe': '#c7ac7d',
'toxic green': '#61de2a',
'tree green': '#2a7e19',
'true blue': '#010fcc',
'true green': '#089404',
'turquoise': '#06c2ac',
'turquoise blue': '#06b1c4',
'turquoise green': '#04f489',
'turtle green': '#75b84f',
'twilight': '#4e518b',
'twilight blue': '#0a437a',
'ugly blue': '#31668a',
'ugly brown': '#7d7103',
'ugly green': '#7a9703',
'ugly pink': '#cd7584',
'ugly purple': '#a442a0',
'ugly yellow': '#d0c101',
'ultramarine': '#2000b1',
'ultramarine blue': '#1805db',
'umber': '#b26400',
'velvet': '#750851',
'vermillion': '#f4320c',
'very dark blue': '#000133',
'very dark brown': '#1d0200',
'very dark green': '#062e03',
'very dark purple': '#2a0134',
'very light blue': '#d5ffff',
'very light brown': '#d3b683',
'very light green': '#d1ffbd',
'very light pink': '#fff4f2',
'very light purple': '#f6cefc',
'very pale blue': '#d6fffe',
'very pale green': '#cffdbc',
'vibrant blue': '#0339f8',
'vibrant green': '#0add08',
'vibrant purple': '#ad03de',
'violet': '#9a0eea',
'violet blue': '#510ac9',
'violet pink': '#fb5ffc',
'violet red': '#a50055',
'viridian': '#1e9167',
'vivid blue': '#152eff',
'vivid green': '#2fef10',
'vivid purple': '#9900fa',
'vomit': '#a2a415',
'vomit green': '#89a203',
'vomit yellow': '#c7c10c',
'warm blue': '#4b57db',
'warm brown': '#964e02',
'warm grey': '#978a84',
'warm pink': '#fb5581',
'warm purple': '#952e8f',
'washed out green': '#bcf5a6',
'water blue': '#0e87cc',
'watermelon': '#fd4659',
'weird green': '#3ae57f',
'wheat': '#fbdd7e',
'white': '#ffffff',
'windows blue': '#3778bf',
'wine': '#80013f',
'wine red': '#7b0323',
'wintergreen': '#20f986',
'wisteria': '#a87dc2',
'yellow': '#ffff14',
'yellow brown': '#b79400',
'yellow green': '#c0fb2d',
'yellow ochre': '#cb9d06',
'yellow orange': '#fcb001',
'yellow tan': '#ffe36e',
'yellow/green': '#c8fd3d',
'yellowgreen': '#bbf90f',
'yellowish': '#faee66',
'yellowish brown': '#9b7a01',
'yellowish green': '#b0dd16',
'yellowish orange': '#ffab0f',
'yellowish tan': '#fcfc81',
'yellowy brown': '#ae8b0c',
'yellowy green': '#bff128'}
The provided code snippet includes necessary dependencies for implementing the `xkcd_palette` function. Write a Python function `def xkcd_palette(colors)` to solve the following problem:
Make a palette with color names from the xkcd color survey. See xkcd for the full list of colors: https://xkcd.com/color/rgb/ This is just a simple wrapper around the `seaborn.xkcd_rgb` dictionary. Parameters ---------- colors : list of strings List of keys in the `seaborn.xkcd_rgb` dictionary. Returns ------- palette A list of colors as RGB tuples. See Also -------- crayon_palette : Make a palette with Crayola crayon colors.
Here is the function:
def xkcd_palette(colors):
"""Make a palette with color names from the xkcd color survey.
See xkcd for the full list of colors: https://xkcd.com/color/rgb/
This is just a simple wrapper around the `seaborn.xkcd_rgb` dictionary.
Parameters
----------
colors : list of strings
List of keys in the `seaborn.xkcd_rgb` dictionary.
Returns
-------
palette
A list of colors as RGB tuples.
See Also
--------
crayon_palette : Make a palette with Crayola crayon colors.
"""
palette = [xkcd_rgb[name] for name in colors]
return color_palette(palette, len(palette)) | Make a palette with color names from the xkcd color survey. See xkcd for the full list of colors: https://xkcd.com/color/rgb/ This is just a simple wrapper around the `seaborn.xkcd_rgb` dictionary. Parameters ---------- colors : list of strings List of keys in the `seaborn.xkcd_rgb` dictionary. Returns ------- palette A list of colors as RGB tuples. See Also -------- crayon_palette : Make a palette with Crayola crayon colors. |
171,841 | import colorsys
from itertools import cycle
import numpy as np
import matplotlib as mpl
from .external import husl
from .utils import desaturate, get_color_cycle
from .colors import xkcd_rgb, crayons
from ._compat import get_colormap
def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):
"""Return a list of colors or continuous colormap defining a palette.
Possible ``palette`` values include:
- Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)
- Name of matplotlib colormap
- 'husl' or 'hls'
- 'ch:<cubehelix arguments>'
- 'light:<color>', 'dark:<color>', 'blend:<color>,<color>',
- A sequence of colors in any format matplotlib accepts
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
See the :ref:`tutorial <palette_tutorial>` for more information.
Parameters
----------
palette : None, string, or sequence, optional
Name of palette or None to return current palette. If a sequence, input
colors are used but possibly cycled and desaturated.
n_colors : int, optional
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors,
but grabbing the current palette or passing in a list of colors will
not change the number of colors unless this is specified. Asking for
more colors than exist in the palette will cause it to cycle. Ignored
when ``as_cmap`` is True.
desat : float, optional
Proportion to desaturate each color by.
as_cmap : bool
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
See Also
--------
set_palette : Set the default color cycle for all plots.
set_color_codes : Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the seaborn palettes.
Examples
--------
.. include:: ../docstrings/color_palette.rst
"""
if palette is None:
palette = get_color_cycle()
if n_colors is None:
n_colors = len(palette)
elif not isinstance(palette, str):
palette = palette
if n_colors is None:
n_colors = len(palette)
else:
if n_colors is None:
# Use all colors in a qualitative palette or 6 of another kind
n_colors = QUAL_PALETTE_SIZES.get(palette, 6)
if palette in SEABORN_PALETTES:
# Named "seaborn variant" of matplotlib default color cycle
palette = SEABORN_PALETTES[palette]
elif palette == "hls":
# Evenly spaced colors in cylindrical RGB space
palette = hls_palette(n_colors, as_cmap=as_cmap)
elif palette == "husl":
# Evenly spaced colors in cylindrical Lab space
palette = husl_palette(n_colors, as_cmap=as_cmap)
elif palette.lower() == "jet":
# Paternalism
raise ValueError("No.")
elif palette.startswith("ch:"):
# Cubehelix palette with params specified in string
args, kwargs = _parse_cubehelix_args(palette)
palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)
elif palette.startswith("light:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("dark:"):
# light palette to color specified in string
_, color = palette.split(":")
reverse = color.endswith("_r")
if reverse:
color = color[:-2]
palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)
elif palette.startswith("blend:"):
# blend palette between colors specified in string
_, colors = palette.split(":")
colors = colors.split(",")
palette = blend_palette(colors, n_colors, as_cmap=as_cmap)
else:
try:
# Perhaps a named matplotlib colormap?
palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)
except (ValueError, KeyError): # Error class changed in mpl36
raise ValueError(f"{palette!r} is not a valid palette name")
if desat is not None:
palette = [desaturate(c, desat) for c in palette]
if not as_cmap:
# Always return as many colors as we asked for
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
# Always return in r, g, b tuple format
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = _ColorPalette(palette)
except ValueError:
raise ValueError(f"Could not generate a palette for {palette}")
return palette
crayons = {'Almond': '#EFDECD',
'Antique Brass': '#CD9575',
'Apricot': '#FDD9B5',
'Aquamarine': '#78DBE2',
'Asparagus': '#87A96B',
'Atomic Tangerine': '#FFA474',
'Banana Mania': '#FAE7B5',
'Beaver': '#9F8170',
'Bittersweet': '#FD7C6E',
'Black': '#000000',
'Blue': '#1F75FE',
'Blue Bell': '#A2A2D0',
'Blue Green': '#0D98BA',
'Blue Violet': '#7366BD',
'Blush': '#DE5D83',
'Brick Red': '#CB4154',
'Brown': '#B4674D',
'Burnt Orange': '#FF7F49',
'Burnt Sienna': '#EA7E5D',
'Cadet Blue': '#B0B7C6',
'Canary': '#FFFF99',
'Caribbean Green': '#00CC99',
'Carnation Pink': '#FFAACC',
'Cerise': '#DD4492',
'Cerulean': '#1DACD6',
'Chestnut': '#BC5D58',
'Copper': '#DD9475',
'Cornflower': '#9ACEEB',
'Cotton Candy': '#FFBCD9',
'Dandelion': '#FDDB6D',
'Denim': '#2B6CC4',
'Desert Sand': '#EFCDB8',
'Eggplant': '#6E5160',
'Electric Lime': '#CEFF1D',
'Fern': '#71BC78',
'Forest Green': '#6DAE81',
'Fuchsia': '#C364C5',
'Fuzzy Wuzzy': '#CC6666',
'Gold': '#E7C697',
'Goldenrod': '#FCD975',
'Granny Smith Apple': '#A8E4A0',
'Gray': '#95918C',
'Green': '#1CAC78',
'Green Yellow': '#F0E891',
'Hot Magenta': '#FF1DCE',
'Inchworm': '#B2EC5D',
'Indigo': '#5D76CB',
'Jazzberry Jam': '#CA3767',
'Jungle Green': '#3BB08F',
'Laser Lemon': '#FEFE22',
'Lavender': '#FCB4D5',
'Macaroni and Cheese': '#FFBD88',
'Magenta': '#F664AF',
'Mahogany': '#CD4A4C',
'Manatee': '#979AAA',
'Mango Tango': '#FF8243',
'Maroon': '#C8385A',
'Mauvelous': '#EF98AA',
'Melon': '#FDBCB4',
'Midnight Blue': '#1A4876',
'Mountain Meadow': '#30BA8F',
'Navy Blue': '#1974D2',
'Neon Carrot': '#FFA343',
'Olive Green': '#BAB86C',
'Orange': '#FF7538',
'Orchid': '#E6A8D7',
'Outer Space': '#414A4C',
'Outrageous Orange': '#FF6E4A',
'Pacific Blue': '#1CA9C9',
'Peach': '#FFCFAB',
'Periwinkle': '#C5D0E6',
'Piggy Pink': '#FDDDE6',
'Pine Green': '#158078',
'Pink Flamingo': '#FC74FD',
'Pink Sherbert': '#F78FA7',
'Plum': '#8E4585',
'Purple Heart': '#7442C8',
"Purple Mountains' Majesty": '#9D81BA',
'Purple Pizzazz': '#FE4EDA',
'Radical Red': '#FF496C',
'Raw Sienna': '#D68A59',
'Razzle Dazzle Rose': '#FF48D0',
'Razzmatazz': '#E3256B',
'Red': '#EE204D',
'Red Orange': '#FF5349',
'Red Violet': '#C0448F',
"Robin's Egg Blue": '#1FCECB',
'Royal Purple': '#7851A9',
'Salmon': '#FF9BAA',
'Scarlet': '#FC2847',
"Screamin' Green": '#76FF7A',
'Sea Green': '#93DFB8',
'Sepia': '#A5694F',
'Shadow': '#8A795D',
'Shamrock': '#45CEA2',
'Shocking Pink': '#FB7EFD',
'Silver': '#CDC5C2',
'Sky Blue': '#80DAEB',
'Spring Green': '#ECEABE',
'Sunglow': '#FFCF48',
'Sunset Orange': '#FD5E53',
'Tan': '#FAA76C',
'Tickle Me Pink': '#FC89AC',
'Timberwolf': '#DBD7D2',
'Tropical Rain Forest': '#17806D',
'Tumbleweed': '#DEAA88',
'Turquoise Blue': '#77DDE7',
'Unmellow Yellow': '#FFFF66',
'Violet (Purple)': '#926EAE',
'Violet Red': '#F75394',
'Vivid Tangerine': '#FFA089',
'Vivid Violet': '#8F509D',
'White': '#FFFFFF',
'Wild Blue Yonder': '#A2ADD0',
'Wild Strawberry': '#FF43A4',
'Wild Watermelon': '#FC6C85',
'Wisteria': '#CDA4DE',
'Yellow': '#FCE883',
'Yellow Green': '#C5E384',
'Yellow Orange': '#FFAE42'}
The provided code snippet includes necessary dependencies for implementing the `crayon_palette` function. Write a Python function `def crayon_palette(colors)` to solve the following problem:
Make a palette with color names from Crayola crayons. Colors are taken from here: https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors This is just a simple wrapper around the `seaborn.crayons` dictionary. Parameters ---------- colors : list of strings List of keys in the `seaborn.crayons` dictionary. Returns ------- palette A list of colors as RGB tuples. See Also -------- xkcd_palette : Make a palette with named colors from the XKCD color survey.
Here is the function:
def crayon_palette(colors):
"""Make a palette with color names from Crayola crayons.
Colors are taken from here:
https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors
This is just a simple wrapper around the `seaborn.crayons` dictionary.
Parameters
----------
colors : list of strings
List of keys in the `seaborn.crayons` dictionary.
Returns
-------
palette
A list of colors as RGB tuples.
See Also
--------
xkcd_palette : Make a palette with named colors from the XKCD color survey.
"""
palette = [crayons[name] for name in colors]
return color_palette(palette, len(palette)) | Make a palette with color names from Crayola crayons. Colors are taken from here: https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors This is just a simple wrapper around the `seaborn.crayons` dictionary. Parameters ---------- colors : list of strings List of keys in the `seaborn.crayons` dictionary. Returns ------- palette A list of colors as RGB tuples. See Also -------- xkcd_palette : Make a palette with named colors from the XKCD color survey. |
171,842 | import collections
import itertools
import re
from typing import Callable, Optional, SupportsInt, Tuple, Union
Union: _SpecialForm = ...
Optional: _SpecialForm = ...
class SupportsInt(Protocol, metaclass=ABCMeta):
def __int__(self) -> int: ...
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
def _parse_letter_version(
letter: str, number: Union[str, bytes, SupportsInt]
) -> Optional[Tuple[str, int]]:
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
return None | null |
171,843 | import collections
import itertools
import re
from typing import Callable, Optional, SupportsInt, Tuple, Union
LocalType = Union[
NegativeInfinityType,
Tuple[
Union[
SubLocalType,
Tuple[SubLocalType, str],
Tuple[NegativeInfinityType, SubLocalType],
],
...,
],
]
_local_version_separators = re.compile(r"[\._-]")
Optional: _SpecialForm = ...
The provided code snippet includes necessary dependencies for implementing the `_parse_local_version` function. Write a Python function `def _parse_local_version(local: str) -> Optional[LocalType]` to solve the following problem:
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
Here is the function:
def _parse_local_version(local: str) -> Optional[LocalType]:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
return None | Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). |
171,844 | import collections
import itertools
import re
from typing import Callable, Optional, SupportsInt, Tuple, Union
Infinity = InfinityType()
NegativeInfinity = NegativeInfinityType()
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
SubLocalType = Union[InfiniteTypes, int, str]
LocalType = Union[
NegativeInfinityType,
Tuple[
Union[
SubLocalType,
Tuple[SubLocalType, str],
Tuple[NegativeInfinityType, SubLocalType],
],
...,
],
]
CmpKey = Tuple[
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
]
Optional: _SpecialForm = ...
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
def _cmpkey(
epoch: int,
release: Tuple[int, ...],
pre: Optional[Tuple[str, int]],
post: Optional[Tuple[str, int]],
dev: Optional[Tuple[str, int]],
local: Optional[Tuple[SubLocalType]],
) -> CmpKey:
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
_release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
_pre: PrePostDevType = NegativeInfinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
_pre = Infinity
else:
_pre = pre
# Versions without a post segment should sort before those with one.
if post is None:
_post: PrePostDevType = NegativeInfinity
else:
_post = post
# Versions without a development segment should sort after those with one.
if dev is None:
_dev: PrePostDevType = Infinity
else:
_dev = dev
if local is None:
# Versions without a local segment should sort before those with one.
_local: LocalType = NegativeInfinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
_local = tuple(
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
return epoch, _release, _pre, _post, _dev, _local | null |
171,845 | import operator
import math
def husl_to_rgb(h, s, l):
return lch_to_rgb(*husl_to_lch([h, s, l]))
def rgb_to_hex(triple):
[r, g, b] = triple
return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))
def husl_to_hex(h, s, l):
return rgb_to_hex(husl_to_rgb(h, s, l)) | null |
171,846 | import operator
import math
def rgb_to_husl(r, g, b):
return lch_to_husl(rgb_to_lch(r, g, b))
def hex_to_rgb(hex):
if hex.startswith('#'):
hex = hex[1:]
r = int(hex[0:2], 16) / 255.0
g = int(hex[2:4], 16) / 255.0
b = int(hex[4:6], 16) / 255.0
return [r, g, b]
def hex_to_husl(hex):
return rgb_to_husl(*hex_to_rgb(hex)) | null |
171,847 | import operator
import math
def huslp_to_rgb(h, s, l):
return lch_to_rgb(*huslp_to_lch([h, s, l]))
def rgb_to_hex(triple):
[r, g, b] = triple
return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))
def huslp_to_hex(h, s, l):
return rgb_to_hex(huslp_to_rgb(h, s, l)) | null |
171,848 | import operator
import math
def rgb_to_huslp(r, g, b):
return lch_to_huslp(rgb_to_lch(r, g, b))
def hex_to_rgb(hex):
if hex.startswith('#'):
hex = hex[1:]
r = int(hex[0:2], 16) / 255.0
g = int(hex[2:4], 16) / 255.0
b = int(hex[4:6], 16) / 255.0
return [r, g, b]
def hex_to_huslp(hex):
return rgb_to_huslp(*hex_to_rgb(hex)) | null |
171,849 | import inspect
import textwrap
import re
import pydoc
from warnings import warn
from collections import namedtuple
from collections.abc import Callable, Mapping
import copy
import sys
The provided code snippet includes necessary dependencies for implementing the `strip_blank_lines` function. Write a Python function `def strip_blank_lines(l)` to solve the following problem:
Remove leading and trailing blank lines from a list of lines
Here is the function:
def strip_blank_lines(l):
"Remove leading and trailing blank lines from a list of lines"
while l and not l[0].strip():
del l[0]
while l and not l[-1].strip():
del l[-1]
return l | Remove leading and trailing blank lines from a list of lines |
171,850 | import inspect
import textwrap
import re
import pydoc
from warnings import warn
from collections import namedtuple
from collections.abc import Callable, Mapping
import copy
import sys
def indent(str, indent=4):
indent_str = ' '*indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines) | null |
171,851 | import inspect
import textwrap
import re
import pydoc
from warnings import warn
from collections import namedtuple
from collections.abc import Callable, Mapping
import copy
import sys
The provided code snippet includes necessary dependencies for implementing the `dedent_lines` function. Write a Python function `def dedent_lines(lines)` to solve the following problem:
Deindent a list of lines maximally
Here is the function:
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n") | Deindent a list of lines maximally |
171,852 | import inspect
import textwrap
import re
import pydoc
from warnings import warn
from collections import namedtuple
from collections.abc import Callable, Mapping
import copy
import sys
def header(text, style='-'):
return text + '\n' + style*len(text) + '\n' | null |
171,853 | import sys
import os
The provided code snippet includes necessary dependencies for implementing the `_get_win_folder_from_registry` function. Write a Python function `def _get_win_folder_from_registry(csidl_name)` to solve the following problem:
This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names.
Here is the function:
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import winreg as _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir | This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. |
171,854 | import sys
import os
unicode = str
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir | null |
171,857 | import warnings
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from ._oldcore import (
VectorPlotter,
)
from .utils import (
locator_to_legend_entries,
adjust_legend_subtitles,
_default_color,
_deprecate_ci,
)
from ._statistics import EstimateAggregator
from .axisgrid import FacetGrid, _facet_docs
from ._docstrings import DocstringComponents, _core_docs
class _LinePlotter(_RelationalPlotter):
_legend_attributes = ["color", "linewidth", "marker", "dashes"]
_legend_func = "plot"
def __init__(
self, *,
data=None, variables={},
estimator=None, n_boot=None, seed=None, errorbar=None,
sort=True, orient="x", err_style=None, err_kws=None, legend=None
):
# TODO this is messy, we want the mapping to be agnostic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * mpl.rcParams["lines.linewidth"]
)
super().__init__(data=data, variables=variables)
self.estimator = estimator
self.errorbar = errorbar
self.n_boot = n_boot
self.seed = seed
self.sort = sort
self.orient = orient
self.err_style = err_style
self.err_kws = {} if err_kws is None else err_kws
self.legend = legend
def plot(self, ax, kws):
"""Draw the plot onto an axes, passing matplotlib kwargs."""
# Draw a test plot, using the passed in kwargs. The goal here is to
# honor both (a) the current state of the plot cycler and (b) the
# specified kwargs on all the lines we will draw, overriding when
# relevant with the data semantics. Note that we won't cycle
# internally; in other words, if `hue` is not used, all elements will
# have the same color, but they will have the color that you would have
# gotten from the corresponding matplotlib function, and calling the
# function will advance the axes property cycle.
kws.setdefault("markeredgewidth", kws.pop("mew", .75))
kws.setdefault("markeredgecolor", kws.pop("mec", "w"))
# Set default error kwargs
err_kws = self.err_kws.copy()
if self.err_style == "band":
err_kws.setdefault("alpha", .2)
elif self.err_style == "bars":
pass
elif self.err_style is not None:
err = "`err_style` must be 'band' or 'bars', not {}"
raise ValueError(err.format(self.err_style))
# Initialize the aggregation object
agg = EstimateAggregator(
self.estimator, self.errorbar, n_boot=self.n_boot, seed=self.seed,
)
# TODO abstract variable to aggregate over here-ish. Better name?
orient = self.orient
if orient not in {"x", "y"}:
err = f"`orient` must be either 'x' or 'y', not {orient!r}."
raise ValueError(err)
other = {"x": "y", "y": "x"}[orient]
# TODO How to handle NA? We don't want NA to propagate through to the
# estimate/CI when some values are present, but we would also like
# matplotlib to show "gaps" in the line when all values are missing.
# This is straightforward absent aggregation, but complicated with it.
# If we want to use nas, we need to conditionalize dropna in iter_data.
# Loop over the semantic subsets and add to the plot
grouping_vars = "hue", "size", "style"
for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):
if self.sort:
sort_vars = ["units", orient, other]
sort_cols = [var for var in sort_vars if var in self.variables]
sub_data = sub_data.sort_values(sort_cols)
if (
self.estimator is not None
and sub_data[orient].value_counts().max() > 1
):
if "units" in self.variables:
# TODO eventually relax this constraint
err = "estimator must be None when specifying units"
raise ValueError(err)
grouped = sub_data.groupby(orient, sort=self.sort)
# Could pass as_index=False instead of reset_index,
# but that fails on a corner case with older pandas.
sub_data = grouped.apply(agg, other).reset_index()
else:
sub_data[f"{other}min"] = np.nan
sub_data[f"{other}max"] = np.nan
# TODO this is pretty ad hoc ; see GH2409
for var in "xy":
if self._log_scaled(var):
for col in sub_data.filter(regex=f"^{var}"):
sub_data[col] = np.power(10, sub_data[col])
# --- Draw the main line(s)
if "units" in self.variables: # XXX why not add to grouping variables?
lines = []
for _, unit_data in sub_data.groupby("units"):
lines.extend(ax.plot(unit_data["x"], unit_data["y"], **kws))
else:
lines = ax.plot(sub_data["x"], sub_data["y"], **kws)
for line in lines:
if "hue" in sub_vars:
line.set_color(self._hue_map(sub_vars["hue"]))
if "size" in sub_vars:
line.set_linewidth(self._size_map(sub_vars["size"]))
if "style" in sub_vars:
attributes = self._style_map(sub_vars["style"])
if "dashes" in attributes:
line.set_dashes(attributes["dashes"])
if "marker" in attributes:
line.set_marker(attributes["marker"])
line_color = line.get_color()
line_alpha = line.get_alpha()
line_capstyle = line.get_solid_capstyle()
# --- Draw the confidence intervals
if self.estimator is not None and self.errorbar is not None:
# TODO handling of orientation will need to happen here
if self.err_style == "band":
func = {"x": ax.fill_between, "y": ax.fill_betweenx}[orient]
func(
sub_data[orient],
sub_data[f"{other}min"], sub_data[f"{other}max"],
color=line_color, **err_kws
)
elif self.err_style == "bars":
error_param = {
f"{other}err": (
sub_data[other] - sub_data[f"{other}min"],
sub_data[f"{other}max"] - sub_data[other],
)
}
ebars = ax.errorbar(
sub_data["x"], sub_data["y"], **error_param,
linestyle="", color=line_color, alpha=line_alpha,
**err_kws
)
# Set the capstyle properly on the error bars
for obj in ebars.get_children():
if isinstance(obj, mpl.collections.LineCollection):
obj.set_capstyle(line_capstyle)
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
class _ScatterPlotter(_RelationalPlotter):
_legend_attributes = ["color", "s", "marker"]
_legend_func = "scatter"
def __init__(self, *, data=None, variables={}, legend=None):
# TODO this is messy, we want the mapping to be agnostic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * np.square(mpl.rcParams["lines.markersize"])
)
super().__init__(data=data, variables=variables)
self.legend = legend
def plot(self, ax, kws):
# --- Determine the visual attributes of the plot
data = self.plot_data.dropna()
if data.empty:
return
# Define the vectors of x and y positions
empty = np.full(len(data), np.nan)
x = data.get("x", empty)
y = data.get("y", empty)
if "style" in self.variables:
# Use a representative marker so scatter sets the edgecolor
# properly for line art markers. We currently enforce either
# all or none line art so this works.
example_level = self._style_map.levels[0]
example_marker = self._style_map(example_level, "marker")
kws.setdefault("marker", example_marker)
# Conditionally set the marker edgecolor based on whether the marker is "filled"
# See https://github.com/matplotlib/matplotlib/issues/17849 for context
m = kws.get("marker", mpl.rcParams.get("marker", "o"))
if not isinstance(m, mpl.markers.MarkerStyle):
# TODO in more recent matplotlib (which?) can pass a MarkerStyle here
m = mpl.markers.MarkerStyle(m)
if m.is_filled():
kws.setdefault("edgecolor", "w")
# Draw the scatter plot
points = ax.scatter(x=x, y=y, **kws)
# Apply the mapping from semantic variables to artist attributes
if "hue" in self.variables:
points.set_facecolors(self._hue_map(data["hue"]))
if "size" in self.variables:
points.set_sizes(self._size_map(data["size"]))
if "style" in self.variables:
p = [self._style_map(val, "path") for val in data["style"]]
points.set_paths(p)
# Apply dependent default attributes
if "linewidth" not in kws:
sizes = points.get_sizes()
points.set_linewidths(.08 * np.sqrt(np.percentile(sizes, 10)))
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
def lineplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None, units=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
dashes=True, markers=None, style_order=None,
estimator="mean", errorbar=("ci", 95), n_boot=1000, seed=None,
orient="x", sort=True, err_style="band", err_kws=None,
legend="auto", ci="deprecated", ax=None, **kwargs
):
# Handle deprecation of ci parameter
errorbar = _deprecate_ci(errorbar, ci)
variables = _LinePlotter.get_semantics(locals())
p = _LinePlotter(
data=data, variables=variables,
estimator=estimator, n_boot=n_boot, seed=seed, errorbar=errorbar,
sort=sort, orient=orient, err_style=err_style, err_kws=err_kws,
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
if ax is None:
ax = plt.gca()
if style is None and not {"ls", "linestyle"} & set(kwargs): # XXX
kwargs["dashes"] = "" if dashes is None or isinstance(dashes, bool) else dashes
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", kwargs.pop("c", None))
kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
lineplot.__doc__ = """\
Draw a line plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
By default, the plot aggregates over multiple `y` values at each value of
`x` and shows an estimate of the central tendency and a confidence
interval for that estimate.
Parameters
----------
{params.core.data}
{params.core.xy}
hue : vector or key in `data`
Grouping variable that will produce lines with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in `data`
Grouping variable that will produce lines with different widths.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in `data`
Grouping variable that will produce lines with different dashes
and/or markers. Can have a numeric dtype but will always be treated
as categorical.
{params.rel.units}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.dashes}
{params.rel.markers}
{params.rel.style_order}
{params.rel.estimator}
{params.stat.errorbar}
{params.rel.n_boot}
{params.rel.seed}
orient : "x" or "y"
Dimension along which the data are sorted / aggregated. Equivalently,
the "independent variable" of the resulting function.
sort : boolean
If True, the data will be sorted by the x and y variables, otherwise
lines will connect points in the order they appear in the dataset.
err_style : "band" or "bars"
Whether to draw the confidence intervals with translucent error bands
or discrete error bars.
err_kws : dict of keyword arguments
Additional parameters to control the aesthetics of the error bars. The
kwargs are passed either to :meth:`matplotlib.axes.Axes.fill_between`
or :meth:`matplotlib.axes.Axes.errorbar`, depending on `err_style`.
{params.rel.legend}
{params.rel.ci}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.plot`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.scatterplot}
{seealso.pointplot}
Examples
--------
.. include:: ../docstrings/lineplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def scatterplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None, legend="auto", ax=None,
**kwargs
):
variables = _ScatterPlotter.get_semantics(locals())
p = _ScatterPlotter(data=data, variables=variables, legend=legend)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, order=style_order)
if ax is None:
ax = plt.gca()
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", None)
kwargs["color"] = _default_color(ax.scatter, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
scatterplot.__doc__ = """\
Draw a scatter plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
Parameters
----------
{params.core.data}
{params.core.xy}
hue : vector or key in `data`
Grouping variable that will produce points with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in `data`
Grouping variable that will produce points with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in `data`
Grouping variable that will produce points with different markers.
Can have a numeric dtype but will always be treated as categorical.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.markers}
{params.rel.style_order}
{params.rel.legend}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.scatter`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.lineplot}
{seealso.stripplot}
{seealso.swarmplot}
Examples
--------
.. include:: ../docstrings/scatterplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
class FacetGrid(Grid):
"""Multi-plot grid for plotting conditional relationships."""
def __init__(
self, data, *,
row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, height=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=False, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None,
):
super().__init__()
# Determine the hue facet layer information
hue_var = hue
if hue is None:
hue_names = None
else:
hue_names = categorical_order(data[hue], hue_order)
colors = self._get_palette(data, hue, hue_order, palette)
# Set up the lists of names for the row and column facet variables
if row is None:
row_names = []
else:
row_names = categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = categorical_order(data[col], col_order)
# Additional dict of kwarg -> list of values for mapping the hue var
hue_kws = hue_kws if hue_kws is not None else {}
# Make a boolean mask that is True anywhere there is an NA
# value in one of the faceting variables, but only if dropna is True
none_na = np.zeros(len(data), bool)
if dropna:
row_na = none_na if row is None else data[row].isnull()
col_na = none_na if col is None else data[col].isnull()
hue_na = none_na if hue is None else data[hue].isnull()
not_na = ~(row_na | col_na | hue_na)
else:
not_na = ~none_na
# Compute the grid shape
ncol = 1 if col is None else len(col_names)
nrow = 1 if row is None else len(row_names)
self._n_facets = ncol * nrow
self._col_wrap = col_wrap
if col_wrap is not None:
if row is not None:
err = "Cannot use `row` and `col_wrap` together."
raise ValueError(err)
ncol = col_wrap
nrow = int(np.ceil(len(col_names) / col_wrap))
self._ncol = ncol
self._nrow = nrow
# Calculate the base figure size
# This can get stretched later by a legend
# TODO this doesn't account for axis labels
figsize = (ncol * height * aspect, nrow * height)
# Validate some inputs
if col_wrap is not None:
margin_titles = False
# Build the subplot keyword dictionary
subplot_kws = {} if subplot_kws is None else subplot_kws.copy()
gridspec_kws = {} if gridspec_kws is None else gridspec_kws.copy()
if xlim is not None:
subplot_kws["xlim"] = xlim
if ylim is not None:
subplot_kws["ylim"] = ylim
# --- Initialize the subplot grid
with _disable_autolayout():
fig = plt.figure(figsize=figsize)
if col_wrap is None:
kwargs = dict(squeeze=False,
sharex=sharex, sharey=sharey,
subplot_kw=subplot_kws,
gridspec_kw=gridspec_kws)
axes = fig.subplots(nrow, ncol, **kwargs)
if col is None and row is None:
axes_dict = {}
elif col is None:
axes_dict = dict(zip(row_names, axes.flat))
elif row is None:
axes_dict = dict(zip(col_names, axes.flat))
else:
facet_product = product(row_names, col_names)
axes_dict = dict(zip(facet_product, axes.flat))
else:
# If wrapping the col variable we need to make the grid ourselves
if gridspec_kws:
warnings.warn("`gridspec_kws` ignored when using `col_wrap`")
n_axes = len(col_names)
axes = np.empty(n_axes, object)
axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)
if sharex:
subplot_kws["sharex"] = axes[0]
if sharey:
subplot_kws["sharey"] = axes[0]
for i in range(1, n_axes):
axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)
axes_dict = dict(zip(col_names, axes))
# --- Set up the class attributes
# Attributes that are part of the public API but accessed through
# a property so that Sphinx adds them to the auto class doc
self._figure = fig
self._axes = axes
self._axes_dict = axes_dict
self._legend = None
# Public attributes that aren't explicitly documented
# (It's not obvious that having them be public was a good idea)
self.data = data
self.row_names = row_names
self.col_names = col_names
self.hue_names = hue_names
self.hue_kws = hue_kws
# Next the private variables
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._margin_titles = margin_titles
self._margin_titles_texts = []
self._col_wrap = col_wrap
self._hue_var = hue_var
self._colors = colors
self._legend_out = legend_out
self._legend_data = {}
self._x_var = None
self._y_var = None
self._sharex = sharex
self._sharey = sharey
self._dropna = dropna
self._not_na = not_na
# --- Make the axes look good
self.set_titles()
self.tight_layout()
if despine:
self.despine()
if sharex in [True, 'col']:
for ax in self._not_bottom_axes:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
if sharey in [True, 'row']:
for ax in self._not_left_axes:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
__init__.__doc__ = dedent("""\
Initialize the matplotlib figure and FacetGrid object.
This class maps a dataset onto multiple axes arrayed in a grid of rows
and columns that correspond to *levels* of variables in the dataset.
The plots it produces are often called "lattice", "trellis", or
"small-multiple" graphics.
It can also represent levels of a third variable with the ``hue``
parameter, which plots different subsets of data in different colors.
This uses color to resolve elements on a third dimension, but only
draws subsets on top of each other and will not tailor the ``hue``
parameter for the specific visualization the way that axes-level
functions that accept ``hue`` will.
The basic workflow is to initialize the :class:`FacetGrid` object with
the dataset and the variables that are used to structure the grid. Then
one or more plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map` or :meth:`FacetGrid.map_dataframe`. Finally, the
plot can be tweaked with other methods to do things like change the
axis labels, use different ticks, or add a legend. See the detailed
code examples below for more information.
.. warning::
When using seaborn functions that infer semantic mappings from a
dataset, care must be taken to synchronize those mappings across
facets (e.g., by defining the ``hue`` mapping with a palette dict or
setting the data type of the variables to ``category``). In most cases,
it will be better to use a figure-level function (e.g. :func:`relplot`
or :func:`catplot`) than to use :class:`FacetGrid` directly.
See the :ref:`tutorial <grid_tutorial>` for more information.
Parameters
----------
{data}
row, col, hue : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``{{var}}_order`` parameters to
control the order of levels of this variable.
{col_wrap}
{share_xy}
{height}
{aspect}
{palette}
{{row,col,hue}}_order : lists
Order for the levels of the faceting variables. By default, this
will be the order that the levels appear in ``data`` or, if the
variables are pandas categoricals, the category order.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
{legend_out}
despine : boolean
Remove the top and right spines from the plots.
{margin_titles}
{{x, y}}lim: tuples
Limits for each of the axes on each facet (only relevant when
share{{x, y}} is True).
subplot_kws : dict
Dictionary of keyword arguments passed to matplotlib subplot(s)
methods.
gridspec_kws : dict
Dictionary of keyword arguments passed to
:class:`matplotlib.gridspec.GridSpec`
(via :meth:`matplotlib.figure.Figure.subplots`).
Ignored if ``col_wrap`` is not ``None``.
See Also
--------
PairGrid : Subplot grid for plotting pairwise relationships
relplot : Combine a relational plot and a :class:`FacetGrid`
displot : Combine a distribution plot and a :class:`FacetGrid`
catplot : Combine a categorical plot and a :class:`FacetGrid`
lmplot : Combine a regression plot and a :class:`FacetGrid`
Examples
--------
.. note::
These examples use seaborn functions to demonstrate some of the
advanced features of the class, but in most cases you will want
to use figue-level functions (e.g. :func:`displot`, :func:`relplot`)
to make the plots shown here.
.. include:: ../docstrings/FacetGrid.rst
""").format(**_facet_docs)
def facet_data(self):
"""Generator for name indices and data subsets for each facet.
Yields
------
(i, j, k), data_ijk : tuple of ints, DataFrame
The ints provide an index into the {row, col, hue}_names attribute,
and the dataframe contains a subset of the full data corresponding
to each facet. The generator yields subsets that correspond with
the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`
is None.
"""
data = self.data
# Construct masks for the row variable
if self.row_names:
row_masks = [data[self._row_var] == n for n in self.row_names]
else:
row_masks = [np.repeat(True, len(self.data))]
# Construct masks for the column variable
if self.col_names:
col_masks = [data[self._col_var] == n for n in self.col_names]
else:
col_masks = [np.repeat(True, len(self.data))]
# Construct masks for the hue variable
if self.hue_names:
hue_masks = [data[self._hue_var] == n for n in self.hue_names]
else:
hue_masks = [np.repeat(True, len(self.data))]
# Here is the main generator loop
for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),
enumerate(col_masks),
enumerate(hue_masks)):
data_ijk = data[row & col & hue & self._not_na]
yield (i, j, k), data_ijk
def map(self, func, *args, **kwargs):
"""Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# How we use the function depends on where it comes from
func_module = str(getattr(func, "__module__", ""))
# Check for categorical plots without order information
if func_module == "seaborn.categorical":
if "order" not in kwargs:
warning = ("Using the {} function without specifying "
"`order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
if len(args) == 3 and "hue_order" not in kwargs:
warning = ("Using the {} function without specifying "
"`hue_order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not func_module.startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = utils.to_utf8(self.hue_names[hue_k])
# Get the actual data we are going to plot with
plot_data = data_ijk[list(args)]
if self._dropna:
plot_data = plot_data.dropna()
plot_args = [v for k, v in plot_data.items()]
# Some matplotlib functions don't handle pandas objects correctly
if func_module.startswith("matplotlib"):
plot_args = [v.values for v in plot_args]
# Draw the plot
self._facet_plot(func, ax, plot_args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
def map_dataframe(self, func, *args, **kwargs):
"""Like ``.map`` but passes args as strings and inserts data in kwargs.
This method is suitable for plotting with functions that accept a
long-form DataFrame as a `data` keyword argument and access the
data in that DataFrame using string variable names.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. Unlike
the `map` method, a function used here must "understand" Pandas
objects. It also must plot to the currently active matplotlib Axes
and take a `color` keyword argument. If faceting on the `hue`
dimension, it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not str(func.__module__).startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = self.hue_names[hue_k]
# Stick the facet dataframe into the kwargs
if self._dropna:
data_ijk = data_ijk.dropna()
kwargs["data"] = data_ijk
# Draw the plot
self._facet_plot(func, ax, args, kwargs)
# For axis labels, prefer to use positional args for backcompat
# but also extract the x/y kwargs and use if no corresponding arg
axis_labels = [kwargs.get("x", None), kwargs.get("y", None)]
for i, val in enumerate(args[:2]):
axis_labels[i] = val
self._finalize_grid(axis_labels)
return self
def _facet_color(self, hue_index, kw_color):
color = self._colors[hue_index]
if kw_color is not None:
return kw_color
elif color is not None:
return color
def _facet_plot(self, func, ax, plot_args, plot_kwargs):
# Draw the plot
if str(func.__module__).startswith("seaborn"):
plot_kwargs = plot_kwargs.copy()
semantics = ["x", "y", "hue", "size", "style"]
for key, val in zip(semantics, plot_args):
plot_kwargs[key] = val
plot_args = []
plot_kwargs["ax"] = ax
func(*plot_args, **plot_kwargs)
# Sort out the supporting information
self._update_legend_data(ax)
def _finalize_grid(self, axlabels):
"""Finalize the annotations and layout."""
self.set_axis_labels(*axlabels)
self.tight_layout()
def facet_axis(self, row_i, col_j, modify_state=True):
"""Make the axis identified by these indices active and return it."""
# Calculate the actual indices of the axes to plot on
if self._col_wrap is not None:
ax = self.axes.flat[col_j]
else:
ax = self.axes[row_i, col_j]
# Get a reference to the axes object we want, and make it active
if modify_state:
plt.sca(ax)
return ax
def despine(self, **kwargs):
"""Remove axis spines from the facets."""
utils.despine(self._figure, **kwargs)
return self
def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
self._x_var = x_var
self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)
if y_var is not None:
self._y_var = y_var
self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)
return self
def set_xlabels(self, label=None, clear_inner=True, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = self._x_var
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
if clear_inner:
for ax in self._not_bottom_axes:
ax.set_xlabel("")
return self
def set_ylabels(self, label=None, clear_inner=True, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = self._y_var
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
if clear_inner:
for ax in self._not_left_axes:
ax.set_ylabel("")
return self
def set_xticklabels(self, labels=None, step=None, **kwargs):
"""Set x axis tick labels of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_xticks()
ax.set_xticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_xticklabels()]
if step is not None:
xticks = ax.get_xticks()[::step]
curr_labels = curr_labels[::step]
ax.set_xticks(xticks)
ax.set_xticklabels(curr_labels, **kwargs)
else:
ax.set_xticklabels(labels, **kwargs)
return self
def set_yticklabels(self, labels=None, **kwargs):
"""Set y axis tick labels on the left column of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_yticks()
ax.set_yticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_yticklabels()]
ax.set_yticklabels(curr_labels, **kwargs)
else:
ax.set_yticklabels(labels, **kwargs)
return self
def set_titles(self, template=None, row_template=None, col_template=None,
**kwargs):
"""Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for all titles with the formatting keys {col_var} and
{col_name} (if using a `col` faceting variable) and/or {row_var}
and {row_name} (if using a `row` faceting variable).
row_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {row_var} and {row_name} formatting keys.
col_template:
Template for the column variable when titles are drawn on the grid
margins. Must have {col_var} and {col_name} formatting keys.
Returns
-------
self: object
Returns self.
"""
args = dict(row_var=self._row_var, col_var=self._col_var)
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
# Establish default templates
if row_template is None:
row_template = "{row_var} = {row_name}"
if col_template is None:
col_template = "{col_var} = {col_name}"
if template is None:
if self._row_var is None:
template = col_template
elif self._col_var is None:
template = row_template
else:
template = " | ".join([row_template, col_template])
row_template = utils.to_utf8(row_template)
col_template = utils.to_utf8(col_template)
template = utils.to_utf8(template)
if self._margin_titles:
# Remove any existing title texts
for text in self._margin_titles_texts:
text.remove()
self._margin_titles_texts = []
if self.row_names is not None:
# Draw the row titles on the right edge of the grid
for i, row_name in enumerate(self.row_names):
ax = self.axes[i, -1]
args.update(dict(row_name=row_name))
title = row_template.format(**args)
text = ax.annotate(
title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center",
**kwargs
)
self._margin_titles_texts.append(text)
if self.col_names is not None:
# Draw the column titles as normal titles
for j, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = col_template.format(**args)
self.axes[0, j].set_title(title, **kwargs)
return self
# Otherwise title each facet with all the necessary information
if (self._row_var is not None) and (self._col_var is not None):
for i, row_name in enumerate(self.row_names):
for j, col_name in enumerate(self.col_names):
args.update(dict(row_name=row_name, col_name=col_name))
title = template.format(**args)
self.axes[i, j].set_title(title, **kwargs)
elif self.row_names is not None and len(self.row_names):
for i, row_name in enumerate(self.row_names):
args.update(dict(row_name=row_name))
title = template.format(**args)
self.axes[i, 0].set_title(title, **kwargs)
elif self.col_names is not None and len(self.col_names):
for i, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = template.format(**args)
# Index the flat array so col_wrap works
self.axes.flat[i].set_title(title, **kwargs)
return self
def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):
"""Add a reference line(s) to each facet.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s). Pass ``color=None`` to
use ``hue`` mapping.
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`FacetGrid` instance
Returns ``self`` for easy method chaining.
"""
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
self.map(plt.axvline, x=x, **line_kws)
if y is not None:
self.map(plt.axhline, y=y, **line_kws)
return self
# ------ Properties that are part of the public API and documented by Sphinx
def axes(self):
"""An array of the :class:`matplotlib.axes.Axes` objects in the grid."""
return self._axes
def ax(self):
"""The :class:`matplotlib.axes.Axes` when no faceting variables are assigned."""
if self.axes.shape == (1, 1):
return self.axes[0, 0]
else:
err = (
"Use the `.axes` attribute when facet variables are assigned."
)
raise AttributeError(err)
def axes_dict(self):
"""A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.
If only one of ``row`` or ``col`` is assigned, each key is a string
representing a level of that variable. If both facet dimensions are
assigned, each key is a ``({row_level}, {col_level})`` tuple.
"""
return self._axes_dict
# ------ Private properties, that require some computation to get
def _inner_axes(self):
"""Return a flat array of the inner axes."""
if self._col_wrap is None:
return self.axes[:-1, 1:].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i % self._ncol
and i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
def _left_axes(self):
"""Return a flat array of the left column of axes."""
if self._col_wrap is None:
return self.axes[:, 0].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if not i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
def _not_left_axes(self):
"""Return a flat array of axes that aren't on the left column."""
if self._col_wrap is None:
return self.axes[:, 1:].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
def _bottom_axes(self):
"""Return a flat array of the bottom row of axes."""
if self._col_wrap is None:
return self.axes[-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i >= (self._ncol * (self._nrow - 1))
or i >= (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
def _not_bottom_axes(self):
"""Return a flat array of axes that aren't on the bottom row."""
if self._col_wrap is None:
return self.axes[:-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
def relplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None, units=None,
row=None, col=None, col_wrap=None, row_order=None, col_order=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=None, dashes=None, style_order=None,
legend="auto", kind="scatter", height=5, aspect=1, facet_kws=None,
**kwargs
):
if kind == "scatter":
plotter = _ScatterPlotter
func = scatterplot
markers = True if markers is None else markers
elif kind == "line":
plotter = _LinePlotter
func = lineplot
dashes = True if dashes is None else dashes
else:
err = f"Plot kind {kind} not recognized"
raise ValueError(err)
# Check for attempt to plot onto specific axes and warn
if "ax" in kwargs:
msg = (
"relplot is a figure-level function and does not accept "
"the `ax` parameter. You may wish to try {}".format(kind + "plot")
)
warnings.warn(msg, UserWarning)
kwargs.pop("ax")
# Use the full dataset to map the semantics
p = plotter(
data=data,
variables=plotter.get_semantics(locals()),
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
# Extract the semantic mappings
if "hue" in p.variables:
palette = p._hue_map.lookup_table
hue_order = p._hue_map.levels
hue_norm = p._hue_map.norm
else:
palette = hue_order = hue_norm = None
if "size" in p.variables:
sizes = p._size_map.lookup_table
size_order = p._size_map.levels
size_norm = p._size_map.norm
if "style" in p.variables:
style_order = p._style_map.levels
if markers:
markers = {k: p._style_map(k, "marker") for k in style_order}
else:
markers = None
if dashes:
dashes = {k: p._style_map(k, "dashes") for k in style_order}
else:
dashes = None
else:
markers = dashes = style_order = None
# Now extract the data that would be used to draw a single plot
variables = p.variables
plot_data = p.plot_data
plot_semantics = p.semantics
# Define the common plotting parameters
plot_kws = dict(
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
sizes=sizes, size_order=size_order, size_norm=size_norm,
markers=markers, dashes=dashes, style_order=style_order,
legend=False,
)
plot_kws.update(kwargs)
if kind == "scatter":
plot_kws.pop("dashes")
# Add the grid semantics onto the plotter
grid_semantics = "row", "col"
p.semantics = plot_semantics + grid_semantics
p.assign_variables(
data=data,
variables=dict(
x=x, y=y,
hue=hue, size=size, style=style, units=units,
row=row, col=col,
),
)
# Define the named variables for plotting on each facet
# Rename the variables with a leading underscore to avoid
# collisions with faceting variable names
plot_variables = {v: f"_{v}" for v in variables}
plot_kws.update(plot_variables)
# Pass the row/col variables to FacetGrid with their original
# names so that the axes titles render correctly
for var in ["row", "col"]:
# Handle faceting variables that lack name information
if var in p.variables and p.variables[var] is None:
p.variables[var] = f"_{var}_"
grid_kws = {v: p.variables.get(v) for v in grid_semantics}
# Rename the columns of the plot_data structure appropriately
new_cols = plot_variables.copy()
new_cols.update(grid_kws)
full_data = p.plot_data.rename(columns=new_cols)
# Set up the FacetGrid object
facet_kws = {} if facet_kws is None else facet_kws.copy()
g = FacetGrid(
data=full_data.dropna(axis=1, how="all"),
**grid_kws,
col_wrap=col_wrap, row_order=row_order, col_order=col_order,
height=height, aspect=aspect, dropna=False,
**facet_kws
)
# Draw the plot
g.map_dataframe(func, **plot_kws)
# Label the axes, using the original variables
# Pass "" when the variable name is None to overwrite internal variables
g.set_axis_labels(variables.get("x") or "", variables.get("y") or "")
# Show the legend
if legend:
# Replace the original plot data so the legend uses
# numeric data with the correct type
p.plot_data = plot_data
p.add_legend_data(g.axes.flat[0])
if p.legend_data:
g.add_legend(legend_data=p.legend_data,
label_order=p.legend_order,
title=p.legend_title,
adjust_subtitles=True)
# Rename the columns of the FacetGrid's `data` attribute
# to match the original column names
orig_cols = {
f"_{k}": f"_{k}_" if v is None else v for k, v in variables.items()
}
grid_data = g.data.rename(columns=orig_cols)
if data is not None and (x is not None or y is not None):
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
g.data = pd.merge(
data,
grid_data[grid_data.columns.difference(data.columns)],
left_index=True,
right_index=True,
)
else:
g.data = grid_data
return g | null |
171,858 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def urlopen(
url: Union[Request, _string],
data: Optional[_string] = ...,
timeout: Optional[float] = ...,
cafile: Optional[_string] = ...,
capath: Optional[_string] = ...,
cadefault: bool = ...,
context: Optional[ssl.SSLContext] = ...,
) -> addinfourl: ...
class BytesIO(BufferedIOBase, BinaryIO):
def __init__(self, initial_bytes: bytes = ...) -> None: ...
# BytesIO does not contain a "name" field. This workaround is necessary
# to allow BytesIO sub-classes to add this field, as it is defined
# as a read-only property on IO[].
name: Any
def __enter__(self: _T) -> _T: ...
def getvalue(self) -> bytes: ...
def getbuffer(self) -> memoryview: ...
if sys.version_info >= (3, 7):
def read1(self, __size: Optional[int] = ...) -> bytes: ...
else:
def read1(self, __size: Optional[int]) -> bytes: ... # type: ignore
The provided code snippet includes necessary dependencies for implementing the `dogplot` function. Write a Python function `def dogplot(*_, **__)` to solve the following problem:
Who's a good boy?
Here is the function:
def dogplot(*_, **__):
"""Who's a good boy?"""
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
from io import BytesIO
url = "https://github.com/mwaskom/seaborn-data/raw/master/png/img{}.png"
pic = np.random.randint(2, 7)
data = BytesIO(urlopen(url.format(pic)).read())
img = plt.imread(data)
f, ax = plt.subplots(figsize=(5, 5), dpi=100)
f.subplots_adjust(0, 0, 1, 1)
ax.imshow(img)
ax.set_axis_off() | Who's a good boy? |
171,859 | from numbers import Number
import numpy as np
import pandas as pd
from .algorithms import bootstrap
from .utils import _check_argument
The provided code snippet includes necessary dependencies for implementing the `_percentile_interval` function. Write a Python function `def _percentile_interval(data, width)` to solve the following problem:
Return a percentile interval from data of a given width.
Here is the function:
def _percentile_interval(data, width):
"""Return a percentile interval from data of a given width."""
edge = (100 - width) / 2
percentiles = edge, 100 - edge
return np.nanpercentile(data, percentiles) | Return a percentile interval from data of a given width. |
171,860 | from numbers import Number
import numpy as np
import pandas as pd
from .algorithms import bootstrap
from .utils import _check_argument
class Number(metaclass=ABCMeta):
def __hash__(self) -> int: ...
def _check_argument(param, options, value):
"""Raise if value for param is not in options."""
if value not in options:
raise ValueError(
f"`{param}` must be one of {options}, but {repr(value)} was passed."
)
The provided code snippet includes necessary dependencies for implementing the `_validate_errorbar_arg` function. Write a Python function `def _validate_errorbar_arg(arg)` to solve the following problem:
Check type and value of errorbar argument and assign default level.
Here is the function:
def _validate_errorbar_arg(arg):
"""Check type and value of errorbar argument and assign default level."""
DEFAULT_LEVELS = {
"ci": 95,
"pi": 95,
"se": 1,
"sd": 1,
}
usage = "`errorbar` must be a callable, string, or (string, number) tuple"
if arg is None:
return None, None
elif callable(arg):
return arg, None
elif isinstance(arg, str):
method = arg
level = DEFAULT_LEVELS.get(method, None)
else:
try:
method, level = arg
except (ValueError, TypeError) as err:
raise err.__class__(usage) from err
_check_argument("errorbar", list(DEFAULT_LEVELS), method)
if level is not None and not isinstance(level, Number):
raise TypeError(usage)
return method, level | Check type and value of errorbar argument and assign default level. |
171,861 | from __future__ import annotations
from itertools import product
from inspect import signature
import warnings
from textwrap import dedent
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from ._oldcore import VectorPlotter, variable_type, categorical_order
from ._compat import share_axis
from . import utils
from .utils import (
adjust_legend_subtitles, _check_argument, _draw_figure, _disable_autolayout
)
from .palettes import color_palette, blend_palette
from ._docstrings import (
DocstringComponents,
_core_docs,
)
class PairGrid(Grid):
"""Subplot grid for plotting pairwise relationships in a dataset.
This object maps each variable in a dataset onto a column and row in a
grid of multiple axes. Different axes-level plotting functions can be
used to draw bivariate plots in the upper and lower triangles, and the
marginal distribution of each variable can be shown on the diagonal.
Several different common plots can be generated in a single line using
:func:`pairplot`. Use :class:`PairGrid` when you need more flexibility.
See the :ref:`tutorial <grid_tutorial>` for more information.
"""
def __init__(
self, data, *, hue=None, vars=None, x_vars=None, y_vars=None,
hue_order=None, palette=None, hue_kws=None, corner=False, diag_sharey=True,
height=2.5, aspect=1, layout_pad=.5, despine=True, dropna=False,
):
"""Initialize the plot figure and PairGrid object.
Parameters
----------
data : DataFrame
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : string (variable name)
Variable in ``data`` to map plot aspects to different colors. This
variable will be excluded from the default x and y variables.
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
layout_pad : scalar
Padding between axes; passed to ``fig.tight_layout``.
despine : boolean
Remove the top and right spines from the plots.
dropna : boolean
Drop missing values from the data before plotting.
See Also
--------
pairplot : Easily drawing common uses of :class:`PairGrid`.
FacetGrid : Subplot grid for plotting conditional relationships.
Examples
--------
.. include:: ../docstrings/PairGrid.rst
"""
super().__init__()
# Sort out the variables that define the grid
numeric_cols = self._find_numeric_cols(data)
if hue in numeric_cols:
numeric_cols.remove(hue)
if vars is not None:
x_vars = list(vars)
y_vars = list(vars)
if x_vars is None:
x_vars = numeric_cols
if y_vars is None:
y_vars = numeric_cols
if np.isscalar(x_vars):
x_vars = [x_vars]
if np.isscalar(y_vars):
y_vars = [y_vars]
self.x_vars = x_vars = list(x_vars)
self.y_vars = y_vars = list(y_vars)
self.square_grid = self.x_vars == self.y_vars
if not x_vars:
raise ValueError("No variables found for grid columns.")
if not y_vars:
raise ValueError("No variables found for grid rows.")
# Create the figure and the array of subplots
figsize = len(x_vars) * height * aspect, len(y_vars) * height
with _disable_autolayout():
fig = plt.figure(figsize=figsize)
axes = fig.subplots(len(y_vars), len(x_vars),
sharex="col", sharey="row",
squeeze=False)
# Possibly remove upper axes to make a corner grid
# Note: setting up the axes is usually the most time-intensive part
# of using the PairGrid. We are foregoing the speed improvement that
# we would get by just not setting up the hidden axes so that we can
# avoid implementing fig.subplots ourselves. But worth thinking about.
self._corner = corner
if corner:
hide_indices = np.triu_indices_from(axes, 1)
for i, j in zip(*hide_indices):
axes[i, j].remove()
axes[i, j] = None
self._figure = fig
self.axes = axes
self.data = data
# Save what we are going to do with the diagonal
self.diag_sharey = diag_sharey
self.diag_vars = None
self.diag_axes = None
self._dropna = dropna
# Label the axes
self._add_axis_labels()
# Sort out the hue variable
self._hue_var = hue
if hue is None:
self.hue_names = hue_order = ["_nolegend_"]
self.hue_vals = pd.Series(["_nolegend_"] * len(data),
index=data.index)
else:
# We need hue_order and hue_names because the former is used to control
# the order of drawing and the latter is used to control the order of
# the legend. hue_names can become string-typed while hue_order must
# retain the type of the input data. This is messy but results from
# the fact that PairGrid can implement the hue-mapping logic itself
# (and was originally written exclusively that way) but now can delegate
# to the axes-level functions, while always handling legend creation.
# See GH2307
hue_names = hue_order = categorical_order(data[hue], hue_order)
if dropna:
# Filter NA from the list of unique hue names
hue_names = list(filter(pd.notnull, hue_names))
self.hue_names = hue_names
self.hue_vals = data[hue]
# Additional dict of kwarg -> list of values for mapping the hue var
self.hue_kws = hue_kws if hue_kws is not None else {}
self._orig_palette = palette
self._hue_order = hue_order
self.palette = self._get_palette(data, hue, hue_order, palette)
self._legend_data = {}
# Make the plot look nice
for ax in axes[:-1, :].flat:
if ax is None:
continue
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
for ax in axes[:, 1:].flat:
if ax is None:
continue
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
self._tight_layout_rect = [.01, .01, .99, .99]
self._tight_layout_pad = layout_pad
self._despine = despine
if despine:
utils.despine(fig=fig)
self.tight_layout(pad=layout_pad)
def map(self, func, **kwargs):
"""Plot with the same function in every subplot.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
row_indices, col_indices = np.indices(self.axes.shape)
indices = zip(row_indices.flat, col_indices.flat)
self._map_bivariate(func, indices, **kwargs)
return self
def map_lower(self, func, **kwargs):
"""Plot with a bivariate function on the lower diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
indices = zip(*np.tril_indices_from(self.axes, -1))
self._map_bivariate(func, indices, **kwargs)
return self
def map_upper(self, func, **kwargs):
"""Plot with a bivariate function on the upper diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
indices = zip(*np.triu_indices_from(self.axes, 1))
self._map_bivariate(func, indices, **kwargs)
return self
def map_offdiag(self, func, **kwargs):
"""Plot with a bivariate function on the off-diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
if self.square_grid:
self.map_lower(func, **kwargs)
if not self._corner:
self.map_upper(func, **kwargs)
else:
indices = []
for i, (y_var) in enumerate(self.y_vars):
for j, (x_var) in enumerate(self.x_vars):
if x_var != y_var:
indices.append((i, j))
self._map_bivariate(func, indices, **kwargs)
return self
def map_diag(self, func, **kwargs):
"""Plot with a univariate function on each diagonal subplot.
Parameters
----------
func : callable plotting function
Must take an x array as a positional argument and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
# Add special diagonal axes for the univariate plot
if self.diag_axes is None:
diag_vars = []
diag_axes = []
for i, y_var in enumerate(self.y_vars):
for j, x_var in enumerate(self.x_vars):
if x_var == y_var:
# Make the density axes
diag_vars.append(x_var)
ax = self.axes[i, j]
diag_ax = ax.twinx()
diag_ax.set_axis_off()
diag_axes.append(diag_ax)
# Work around matplotlib bug
# https://github.com/matplotlib/matplotlib/issues/15188
if not plt.rcParams.get("ytick.left", True):
for tick in ax.yaxis.majorTicks:
tick.tick1line.set_visible(False)
# Remove main y axis from density axes in a corner plot
if self._corner:
ax.yaxis.set_visible(False)
if self._despine:
utils.despine(ax=ax, left=True)
# TODO add optional density ticks (on the right)
# when drawing a corner plot?
if self.diag_sharey and diag_axes:
for ax in diag_axes[1:]:
share_axis(diag_axes[0], ax, "y")
self.diag_vars = np.array(diag_vars, np.object_)
self.diag_axes = np.array(diag_axes, np.object_)
if "hue" not in signature(func).parameters:
return self._map_diag_iter_hue(func, **kwargs)
# Loop over diagonal variables and axes, making one plot in each
for var, ax in zip(self.diag_vars, self.diag_axes):
plot_kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
plot_kwargs["ax"] = ax
else:
plt.sca(ax)
vector = self.data[var]
if self._hue_var is not None:
hue = self.data[self._hue_var]
else:
hue = None
if self._dropna:
not_na = vector.notna()
if hue is not None:
not_na &= hue.notna()
vector = vector[not_na]
if hue is not None:
hue = hue[not_na]
plot_kwargs.setdefault("hue", hue)
plot_kwargs.setdefault("hue_order", self._hue_order)
plot_kwargs.setdefault("palette", self._orig_palette)
func(x=vector, **plot_kwargs)
ax.legend_ = None
self._add_axis_labels()
return self
def _map_diag_iter_hue(self, func, **kwargs):
"""Put marginal plot on each diagonal axes, iterating over hue."""
# Plot on each of the diagonal axes
fixed_color = kwargs.pop("color", None)
for var, ax in zip(self.diag_vars, self.diag_axes):
hue_grouped = self.data[var].groupby(self.hue_vals)
plot_kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
plot_kwargs["ax"] = ax
else:
plt.sca(ax)
for k, label_k in enumerate(self._hue_order):
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.Series([], dtype=float)
if fixed_color is None:
color = self.palette[k]
else:
color = fixed_color
if self._dropna:
data_k = utils.remove_na(data_k)
if str(func.__module__).startswith("seaborn"):
func(x=data_k, label=label_k, color=color, **plot_kwargs)
else:
func(data_k, label=label_k, color=color, **plot_kwargs)
self._add_axis_labels()
return self
def _map_bivariate(self, func, indices, **kwargs):
"""Draw a bivariate plot on the indicated axes."""
# This is a hack to handle the fact that new distribution plots don't add
# their artists onto the axes. This is probably superior in general, but
# we'll need a better way to handle it in the axisgrid functions.
from .distributions import histplot, kdeplot
if func is histplot or func is kdeplot:
self._extract_legend_handles = True
kws = kwargs.copy() # Use copy as we insert other kwargs
for i, j in indices:
x_var = self.x_vars[j]
y_var = self.y_vars[i]
ax = self.axes[i, j]
if ax is None: # i.e. we are in corner mode
continue
self._plot_bivariate(x_var, y_var, ax, func, **kws)
self._add_axis_labels()
if "hue" in signature(func).parameters:
self.hue_names = list(self._legend_data)
def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):
"""Draw a bivariate plot on the specified axes."""
if "hue" not in signature(func).parameters:
self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)
return
kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
kwargs["ax"] = ax
else:
plt.sca(ax)
if x_var == y_var:
axes_vars = [x_var]
else:
axes_vars = [x_var, y_var]
if self._hue_var is not None and self._hue_var not in axes_vars:
axes_vars.append(self._hue_var)
data = self.data[axes_vars]
if self._dropna:
data = data.dropna()
x = data[x_var]
y = data[y_var]
if self._hue_var is None:
hue = None
else:
hue = data.get(self._hue_var)
if "hue" not in kwargs:
kwargs.update({
"hue": hue, "hue_order": self._hue_order, "palette": self._orig_palette,
})
func(x=x, y=y, **kwargs)
self._update_legend_data(ax)
def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):
"""Draw a bivariate plot while iterating over hue subsets."""
kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
kwargs["ax"] = ax
else:
plt.sca(ax)
if x_var == y_var:
axes_vars = [x_var]
else:
axes_vars = [x_var, y_var]
hue_grouped = self.data.groupby(self.hue_vals)
for k, label_k in enumerate(self._hue_order):
kws = kwargs.copy()
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.DataFrame(columns=axes_vars,
dtype=float)
if self._dropna:
data_k = data_k[axes_vars].dropna()
x = data_k[x_var]
y = data_k[y_var]
for kw, val_list in self.hue_kws.items():
kws[kw] = val_list[k]
kws.setdefault("color", self.palette[k])
if self._hue_var is not None:
kws["label"] = label_k
if str(func.__module__).startswith("seaborn"):
func(x=x, y=y, **kws)
else:
func(x, y, **kws)
self._update_legend_data(ax)
def _add_axis_labels(self):
"""Add labels to the left and bottom Axes."""
for ax, label in zip(self.axes[-1, :], self.x_vars):
ax.set_xlabel(label)
for ax, label in zip(self.axes[:, 0], self.y_vars):
ax.set_ylabel(label)
def _find_numeric_cols(self, data):
"""Find which variables in a DataFrame are numeric."""
numeric_cols = []
for col in data:
if variable_type(data[col]) == "numeric":
numeric_cols.append(col)
return numeric_cols
def histplot(
data=None, *,
# Vector variables
x=None, y=None, hue=None, weights=None,
# Histogram computation parameters
stat="count", bins="auto", binwidth=None, binrange=None,
discrete=None, cumulative=False, common_bins=True, common_norm=True,
# Histogram appearance parameters
multiple="layer", element="bars", fill=True, shrink=1,
# Histogram smoothing with a kernel density estimate
kde=False, kde_kws=None, line_kws=None,
# Bivariate histogram parameters
thresh=0, pthresh=None, pmax=None, cbar=False, cbar_ax=None, cbar_kws=None,
# Hue mapping parameters
palette=None, hue_order=None, hue_norm=None, color=None,
# Axes information
log_scale=None, legend=True, ax=None,
# Other appearance keywords
**kwargs,
):
p = _DistributionPlotter(
data=data,
variables=_DistributionPlotter.get_semantics(locals())
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
if ax is None:
ax = plt.gca()
p._attach(ax, log_scale=log_scale)
if p.univariate: # Note, bivariate plots won't cycle
if fill:
method = ax.bar if element == "bars" else ax.fill_between
else:
method = ax.plot
color = _default_color(method, hue, color, kwargs)
if not p.has_xy_data:
return ax
# Default to discrete bins for categorical variables
if discrete is None:
discrete = p._default_discrete()
estimate_kws = dict(
stat=stat,
bins=bins,
binwidth=binwidth,
binrange=binrange,
discrete=discrete,
cumulative=cumulative,
)
if p.univariate:
p.plot_univariate_histogram(
multiple=multiple,
element=element,
fill=fill,
shrink=shrink,
common_norm=common_norm,
common_bins=common_bins,
kde=kde,
kde_kws=kde_kws,
color=color,
legend=legend,
estimate_kws=estimate_kws,
line_kws=line_kws,
**kwargs,
)
else:
p.plot_bivariate_histogram(
common_bins=common_bins,
common_norm=common_norm,
thresh=thresh,
pthresh=pthresh,
pmax=pmax,
color=color,
legend=legend,
cbar=cbar,
cbar_ax=cbar_ax,
cbar_kws=cbar_kws,
estimate_kws=estimate_kws,
**kwargs,
)
return ax
histplot.__doc__ = """\
Plot univariate or bivariate histograms to show distributions of datasets.
A histogram is a classic visualization tool that represents the distribution
of one or more variables by counting the number of observations that fall within
discrete bins.
This function can normalize the statistic computed within each bin to estimate
frequency, density or probability mass, and it can add a smooth curve obtained
using a kernel density estimate, similar to :func:`kdeplot`.
More information is provided in the :ref:`user guide <tutorial_hist>`.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
weights : vector or key in ``data``
If provided, weight the contribution of the corresponding data points
towards the count in each bin by these factors.
{params.hist.stat}
{params.hist.bins}
{params.hist.binwidth}
{params.hist.binrange}
discrete : bool
If True, default to ``binwidth=1`` and draw the bars so that they are
centered on their corresponding data points. This avoids "gaps" that may
otherwise appear when using discrete (integer) data.
cumulative : bool
If True, plot the cumulative counts as bins increase.
common_bins : bool
If True, use the same bins when semantic variables produce multiple
plots. If using a reference rule to determine the bins, it will be computed
with the full dataset.
common_norm : bool
If True and using a normalized statistic, the normalization will apply over
the full dataset. Otherwise, normalize each histogram independently.
multiple : {{"layer", "dodge", "stack", "fill"}}
Approach to resolving multiple elements when semantic mapping creates subsets.
Only relevant with univariate data.
element : {{"bars", "step", "poly"}}
Visual representation of the histogram statistic.
Only relevant with univariate data.
fill : bool
If True, fill in the space under the histogram.
Only relevant with univariate data.
shrink : number
Scale the width of each bar relative to the binwidth by this factor.
Only relevant with univariate data.
kde : bool
If True, compute a kernel density estimate to smooth the distribution
and show on the plot as (one or more) line(s).
Only relevant with univariate data.
kde_kws : dict
Parameters that control the KDE computation, as in :func:`kdeplot`.
line_kws : dict
Parameters that control the KDE visualization, passed to
:meth:`matplotlib.axes.Axes.plot`.
thresh : number or None
Cells with a statistic less than or equal to this value will be transparent.
Only relevant with bivariate data.
pthresh : number or None
Like ``thresh``, but a value in [0, 1] such that cells with aggregate counts
(or other statistics, when used) up to this proportion of the total will be
transparent.
pmax : number or None
A value in [0, 1] that sets that saturation point for the colormap at a value
such that cells below constitute this proportion of the total count (or
other statistic, when used).
{params.dist.cbar}
{params.dist.cbar_ax}
{params.dist.cbar_kws}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.core.color}
{params.dist.log_scale}
{params.dist.legend}
{params.core.ax}
kwargs
Other keyword arguments are passed to one of the following matplotlib
functions:
- :meth:`matplotlib.axes.Axes.bar` (univariate, element="bars")
- :meth:`matplotlib.axes.Axes.fill_between` (univariate, other element, fill=True)
- :meth:`matplotlib.axes.Axes.plot` (univariate, other element, fill=False)
- :meth:`matplotlib.axes.Axes.pcolormesh` (bivariate)
Returns
-------
{returns.ax}
See Also
--------
{seealso.displot}
{seealso.kdeplot}
{seealso.rugplot}
{seealso.ecdfplot}
{seealso.jointplot}
Notes
-----
The choice of bins for computing and plotting a histogram can exert
substantial influence on the insights that one is able to draw from the
visualization. If the bins are too large, they may erase important features.
On the other hand, bins that are too small may be dominated by random
variability, obscuring the shape of the true underlying distribution. The
default bin size is determined using a reference rule that depends on the
sample size and variance. This works well in many cases, (i.e., with
"well-behaved" data) but it fails in others. It is always a good to try
different bin sizes to be sure that you are not missing something important.
This function allows you to specify bins in several different ways, such as
by setting the total number of bins to use, the width of each bin, or the
specific locations where the bins should break.
Examples
--------
.. include:: ../docstrings/histplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def kdeplot(
data=None, *, x=None, y=None, hue=None, weights=None,
palette=None, hue_order=None, hue_norm=None, color=None, fill=None,
multiple="layer", common_norm=True, common_grid=False, cumulative=False,
bw_method="scott", bw_adjust=1, warn_singular=True, log_scale=None,
levels=10, thresh=.05, gridsize=200, cut=3, clip=None,
legend=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None,
**kwargs,
):
# --- Start with backwards compatability for versions < 0.11.0 ----------------
# Handle (past) deprecation of `data2`
if "data2" in kwargs:
msg = "`data2` has been removed (replaced by `y`); please update your code."
TypeError(msg)
# Handle deprecation of `vertical`
vertical = kwargs.pop("vertical", None)
if vertical is not None:
if vertical:
action_taken = "assigning data to `y`."
if x is None:
data, y = y, data
else:
x, y = y, x
else:
action_taken = "assigning data to `x`."
msg = textwrap.dedent(f"""\n
The `vertical` parameter is deprecated; {action_taken}
This will become an error in seaborn v0.13.0; please update your code.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
# Handle deprecation of `bw`
bw = kwargs.pop("bw", None)
if bw is not None:
msg = textwrap.dedent(f"""\n
The `bw` parameter is deprecated in favor of `bw_method` and `bw_adjust`.
Setting `bw_method={bw}`, but please see the docs for the new parameters
and update your code. This will become an error in seaborn v0.13.0.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
bw_method = bw
# Handle deprecation of `kernel`
if kwargs.pop("kernel", None) is not None:
msg = textwrap.dedent("""\n
Support for alternate kernels has been removed; using Gaussian kernel.
This will become an error in seaborn v0.13.0; please update your code.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
# Handle deprecation of shade_lowest
shade_lowest = kwargs.pop("shade_lowest", None)
if shade_lowest is not None:
if shade_lowest:
thresh = 0
msg = textwrap.dedent(f"""\n
`shade_lowest` has been replaced by `thresh`; setting `thresh={thresh}.
This will become an error in seaborn v0.13.0; please update your code.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
# Handle "soft" deprecation of shade `shade` is not really the right
# terminology here, but unlike some of the other deprecated parameters it
# is probably very commonly used and much hard to remove. This is therefore
# going to be a longer process where, first, `fill` will be introduced and
# be used throughout the documentation. In 0.12, when kwarg-only
# enforcement hits, we can remove the shade/shade_lowest out of the
# function signature all together and pull them out of the kwargs. Then we
# can actually fire a FutureWarning, and eventually remove.
shade = kwargs.pop("shade", None)
if shade is not None:
fill = shade
msg = textwrap.dedent(f"""\n
`shade` is now deprecated in favor of `fill`; setting `fill={shade}`.
This will become an error in seaborn v0.14.0; please update your code.
""")
warnings.warn(msg, FutureWarning, stacklevel=2)
# Handle `n_levels`
# This was never in the formal API but it was processed, and appeared in an
# example. We can treat as an alias for `levels` now and deprecate later.
levels = kwargs.pop("n_levels", levels)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
p = _DistributionPlotter(
data=data,
variables=_DistributionPlotter.get_semantics(locals()),
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
if ax is None:
ax = plt.gca()
p._attach(ax, allowed_types=["numeric", "datetime"], log_scale=log_scale)
method = ax.fill_between if fill else ax.plot
color = _default_color(method, hue, color, kwargs)
if not p.has_xy_data:
return ax
# Pack the kwargs for statistics.KDE
estimate_kws = dict(
bw_method=bw_method,
bw_adjust=bw_adjust,
gridsize=gridsize,
cut=cut,
clip=clip,
cumulative=cumulative,
)
if p.univariate:
plot_kws = kwargs.copy()
p.plot_univariate_density(
multiple=multiple,
common_norm=common_norm,
common_grid=common_grid,
fill=fill,
color=color,
legend=legend,
warn_singular=warn_singular,
estimate_kws=estimate_kws,
**plot_kws,
)
else:
p.plot_bivariate_density(
common_norm=common_norm,
fill=fill,
levels=levels,
thresh=thresh,
legend=legend,
color=color,
warn_singular=warn_singular,
cbar=cbar,
cbar_ax=cbar_ax,
cbar_kws=cbar_kws,
estimate_kws=estimate_kws,
**kwargs,
)
return ax
kdeplot.__doc__ = """\
Plot univariate or bivariate distributions using kernel density estimation.
A kernel density estimate (KDE) plot is a method for visualizing the
distribution of observations in a dataset, analogous to a histogram. KDE
represents the data using a continuous probability density curve in one or
more dimensions.
The approach is explained further in the :ref:`user guide <tutorial_kde>`.
Relative to a histogram, KDE can produce a plot that is less cluttered and
more interpretable, especially when drawing multiple distributions. But it
has the potential to introduce distortions if the underlying distribution is
bounded or not smooth. Like a histogram, the quality of the representation
also depends on the selection of good smoothing parameters.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
weights : vector or key in ``data``
If provided, weight the kernel density estimation using these values.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.core.color}
fill : bool or None
If True, fill in the area under univariate density curves or between
bivariate contours. If None, the default depends on ``multiple``.
{params.dist.multiple}
common_norm : bool
If True, scale each conditional density by the number of observations
such that the total area under all densities sums to 1. Otherwise,
normalize each density independently.
common_grid : bool
If True, use the same evaluation grid for each kernel density estimate.
Only relevant with univariate data.
{params.kde.cumulative}
{params.kde.bw_method}
{params.kde.bw_adjust}
warn_singular : bool
If True, issue a warning when trying to estimate the density of data
with zero variance.
{params.dist.log_scale}
levels : int or vector
Number of contour levels or values to draw contours at. A vector argument
must have increasing values in [0, 1]. Levels correspond to iso-proportions
of the density: e.g., 20% of the probability mass will lie below the
contour drawn for 0.2. Only relevant with bivariate data.
thresh : number in [0, 1]
Lowest iso-proportion level at which to draw a contour line. Ignored when
``levels`` is a vector. Only relevant with bivariate data.
gridsize : int
Number of points on each dimension of the evaluation grid.
{params.kde.cut}
{params.kde.clip}
{params.dist.legend}
{params.dist.cbar}
{params.dist.cbar_ax}
{params.dist.cbar_kws}
{params.core.ax}
kwargs
Other keyword arguments are passed to one of the following matplotlib
functions:
- :meth:`matplotlib.axes.Axes.plot` (univariate, ``fill=False``),
- :meth:`matplotlib.axes.Axes.fill_between` (univariate, ``fill=True``),
- :meth:`matplotlib.axes.Axes.contour` (bivariate, ``fill=False``),
- :meth:`matplotlib.axes.contourf` (bivariate, ``fill=True``).
Returns
-------
{returns.ax}
See Also
--------
{seealso.displot}
{seealso.histplot}
{seealso.ecdfplot}
{seealso.jointplot}
{seealso.violinplot}
Notes
-----
The *bandwidth*, or standard deviation of the smoothing kernel, is an
important parameter. Misspecification of the bandwidth can produce a
distorted representation of the data. Much like the choice of bin width in a
histogram, an over-smoothed curve can erase true features of a
distribution, while an under-smoothed curve can create false features out of
random variability. The rule-of-thumb that sets the default bandwidth works
best when the true distribution is smooth, unimodal, and roughly bell-shaped.
It is always a good idea to check the default behavior by using ``bw_adjust``
to increase or decrease the amount of smoothing.
Because the smoothing algorithm uses a Gaussian kernel, the estimated density
curve can extend to values that do not make sense for a particular dataset.
For example, the curve may be drawn over negative values when smoothing data
that are naturally positive. The ``cut`` and ``clip`` parameters can be used
to control the extent of the curve, but datasets that have many observations
close to a natural boundary may be better served by a different visualization
method.
Similar considerations apply when a dataset is naturally discrete or "spiky"
(containing many repeated observations of the same value). Kernel density
estimation will always produce a smooth curve, which would be misleading
in these situations.
The units on the density axis are a common source of confusion. While kernel
density estimation produces a probability distribution, the height of the curve
at each point gives a density, not a probability. A probability can be obtained
only by integrating the density across a range. The curve is normalized so
that the integral over all possible values is 1, meaning that the scale of
the density axis depends on the data values.
Examples
--------
.. include:: ../docstrings/kdeplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def scatterplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None, legend="auto", ax=None,
**kwargs
):
variables = _ScatterPlotter.get_semantics(locals())
p = _ScatterPlotter(data=data, variables=variables, legend=legend)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, order=style_order)
if ax is None:
ax = plt.gca()
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", None)
kwargs["color"] = _default_color(ax.scatter, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
scatterplot.__doc__ = """\
Draw a scatter plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
Parameters
----------
{params.core.data}
{params.core.xy}
hue : vector or key in `data`
Grouping variable that will produce points with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in `data`
Grouping variable that will produce points with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in `data`
Grouping variable that will produce points with different markers.
Can have a numeric dtype but will always be treated as categorical.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.markers}
{params.rel.style_order}
{params.rel.legend}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.scatter`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.lineplot}
{seealso.stripplot}
{seealso.swarmplot}
Examples
--------
.. include:: ../docstrings/scatterplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def regplot(
data=None, *, x=None, y=None,
x_estimator=None, x_bins=None, x_ci="ci",
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
seed=None, order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None,
truncate=True, dropna=True, x_jitter=None, y_jitter=None,
label=None, color=None, marker="o",
scatter_kws=None, line_kws=None, ax=None
):
plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
scatter, fit_reg, ci, n_boot, units, seed,
order, logistic, lowess, robust, logx,
x_partial, y_partial, truncate, dropna,
x_jitter, y_jitter, color, label)
if ax is None:
ax = plt.gca()
scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
scatter_kws["marker"] = marker
line_kws = {} if line_kws is None else copy.copy(line_kws)
plotter.plot(ax, scatter_kws, line_kws)
return ax
regplot.__doc__ = dedent("""\
Plot data and a linear regression model fit.
{model_api}
Parameters
----------
x, y: string, series, or vector array
Input variables. If strings, these should correspond with column names
in ``data``. When pandas objects are used, axes will be labeled with
the series name.
{data}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{seed}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
label : string
Label to apply to either the scatterplot or regression line (if
``scatter`` is ``False``) for use in a legend.
color : matplotlib color
Color to apply to all plot elements; will be superseded by colors
passed in ``scatter_kws`` or ``line_kws``.
marker : matplotlib marker code
Marker to use for the scatterplot glyphs.
{scatter_line_kws}
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
Returns
-------
ax : matplotlib Axes
The Axes object containing the plot.
See Also
--------
lmplot : Combine :func:`regplot` and :class:`FacetGrid` to plot multiple
linear relationships in a dataset.
jointplot : Combine :func:`regplot` and :class:`JointGrid` (when used with
``kind="reg"``).
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
residplot : Plot the residuals of a linear regression model.
Notes
-----
{regplot_vs_lmplot}
It's also easy to combine :func:`regplot` and :class:`JointGrid` or
:class:`PairGrid` through the :func:`jointplot` and :func:`pairplot`
functions, although these do not directly accept all of :func:`regplot`'s
parameters.
Examples
--------
.. include: ../docstrings/regplot.rst
""").format(**_regression_docs)
The provided code snippet includes necessary dependencies for implementing the `pairplot` function. Write a Python function `def pairplot( data, *, hue=None, hue_order=None, palette=None, vars=None, x_vars=None, y_vars=None, kind="scatter", diag_kind="auto", markers=None, height=2.5, aspect=1, corner=False, dropna=False, plot_kws=None, diag_kws=None, grid_kws=None, size=None, )` to solve the following problem:
Plot pairwise relationships in a dataset. By default, this function will create a grid of Axes such that each numeric variable in ``data`` will by shared across the y-axes across a single row and the x-axes across a single column. The diagonal plots are treated differently: a univariate distribution plot is drawn to show the marginal distribution of the data in each column. It is also possible to show a subset of variables or plot different variables on the rows and columns. This is a high-level interface for :class:`PairGrid` that is intended to make it easy to draw a few common styles. You should use :class:`PairGrid` directly if you need more flexibility. Parameters ---------- data : `pandas.DataFrame` Tidy (long-form) dataframe where each column is a variable and each row is an observation. hue : name of variable in ``data`` Variable in ``data`` to map plot aspects to different colors. hue_order : list of strings Order for the levels of the hue variable in the palette palette : dict or seaborn color palette Set of colors for mapping the ``hue`` variable. If a dict, keys should be values in the ``hue`` variable. vars : list of variable names Variables within ``data`` to use, otherwise use every column with a numeric datatype. {x, y}_vars : lists of variable names Variables within ``data`` to use separately for the rows and columns of the figure; i.e. to make a non-square plot. kind : {'scatter', 'kde', 'hist', 'reg'} Kind of plot to make. diag_kind : {'auto', 'hist', 'kde', None} Kind of plot for the diagonal subplots. If 'auto', choose based on whether or not ``hue`` is used. markers : single matplotlib marker code or list Either the marker to use for all scatterplot points or a list of markers with a length the same as the number of levels in the hue variable so that differently colored points will also have different scatterplot markers. height : scalar Height (in inches) of each facet. aspect : scalar Aspect * height gives the width (in inches) of each facet. corner : bool If True, don't add axes to the upper (off-diagonal) triangle of the grid, making this a "corner" plot. dropna : boolean Drop missing values from the data before plotting. {plot, diag, grid}_kws : dicts Dictionaries of keyword arguments. ``plot_kws`` are passed to the bivariate plotting function, ``diag_kws`` are passed to the univariate plotting function, and ``grid_kws`` are passed to the :class:`PairGrid` constructor. Returns ------- grid : :class:`PairGrid` Returns the underlying :class:`PairGrid` instance for further tweaking. See Also -------- PairGrid : Subplot grid for more flexible plotting of pairwise relationships. JointGrid : Grid for plotting joint and marginal distributions of two variables. Examples -------- .. include:: ../docstrings/pairplot.rst
Here is the function:
def pairplot(
data, *,
hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="auto", markers=None,
height=2.5, aspect=1, corner=False, dropna=False,
plot_kws=None, diag_kws=None, grid_kws=None, size=None,
):
"""Plot pairwise relationships in a dataset.
By default, this function will create a grid of Axes such that each numeric
variable in ``data`` will by shared across the y-axes across a single row and
the x-axes across a single column. The diagonal plots are treated
differently: a univariate distribution plot is drawn to show the marginal
distribution of the data in each column.
It is also possible to show a subset of variables or plot different
variables on the rows and columns.
This is a high-level interface for :class:`PairGrid` that is intended to
make it easy to draw a few common styles. You should use :class:`PairGrid`
directly if you need more flexibility.
Parameters
----------
data : `pandas.DataFrame`
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : name of variable in ``data``
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
kind : {'scatter', 'kde', 'hist', 'reg'}
Kind of plot to make.
diag_kind : {'auto', 'hist', 'kde', None}
Kind of plot for the diagonal subplots. If 'auto', choose based on
whether or not ``hue`` is used.
markers : single matplotlib marker code or list
Either the marker to use for all scatterplot points or a list of markers
with a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
dropna : boolean
Drop missing values from the data before plotting.
{plot, diag, grid}_kws : dicts
Dictionaries of keyword arguments. ``plot_kws`` are passed to the
bivariate plotting function, ``diag_kws`` are passed to the univariate
plotting function, and ``grid_kws`` are passed to the :class:`PairGrid`
constructor.
Returns
-------
grid : :class:`PairGrid`
Returns the underlying :class:`PairGrid` instance for further tweaking.
See Also
--------
PairGrid : Subplot grid for more flexible plotting of pairwise relationships.
JointGrid : Grid for plotting joint and marginal distributions of two variables.
Examples
--------
.. include:: ../docstrings/pairplot.rst
"""
# Avoid circular import
from .distributions import histplot, kdeplot
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
if not isinstance(data, pd.DataFrame):
raise TypeError(
f"'data' must be pandas DataFrame object, not: {type(data)}")
plot_kws = {} if plot_kws is None else plot_kws.copy()
diag_kws = {} if diag_kws is None else diag_kws.copy()
grid_kws = {} if grid_kws is None else grid_kws.copy()
# Resolve "auto" diag kind
if diag_kind == "auto":
if hue is None:
diag_kind = "kde" if kind == "kde" else "hist"
else:
diag_kind = "hist" if kind == "hist" else "kde"
# Set up the PairGrid
grid_kws.setdefault("diag_sharey", diag_kind == "hist")
grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,
hue_order=hue_order, palette=palette, corner=corner,
height=height, aspect=aspect, dropna=dropna, **grid_kws)
# Add the markers here as PairGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if markers is not None:
if kind == "reg":
# Needed until regplot supports style
if grid.hue_names is None:
n_markers = 1
else:
n_markers = len(grid.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError("markers must be a singleton or a list of "
"markers for each level of the hue variable")
grid.hue_kws = {"marker": markers}
elif kind == "scatter":
if isinstance(markers, str):
plot_kws["marker"] = markers
elif hue is not None:
plot_kws["style"] = data[hue]
plot_kws["markers"] = markers
# Draw the marginal plots on the diagonal
diag_kws = diag_kws.copy()
diag_kws.setdefault("legend", False)
if diag_kind == "hist":
grid.map_diag(histplot, **diag_kws)
elif diag_kind == "kde":
diag_kws.setdefault("fill", True)
diag_kws.setdefault("warn_singular", False)
grid.map_diag(kdeplot, **diag_kws)
# Maybe plot on the off-diagonals
if diag_kind is not None:
plotter = grid.map_offdiag
else:
plotter = grid.map
if kind == "scatter":
from .relational import scatterplot # Avoid circular import
plotter(scatterplot, **plot_kws)
elif kind == "reg":
from .regression import regplot # Avoid circular import
plotter(regplot, **plot_kws)
elif kind == "kde":
from .distributions import kdeplot # Avoid circular import
plot_kws.setdefault("warn_singular", False)
plotter(kdeplot, **plot_kws)
elif kind == "hist":
from .distributions import histplot # Avoid circular import
plotter(histplot, **plot_kws)
# Add a legend
if hue is not None:
grid.add_legend()
grid.tight_layout()
return grid | Plot pairwise relationships in a dataset. By default, this function will create a grid of Axes such that each numeric variable in ``data`` will by shared across the y-axes across a single row and the x-axes across a single column. The diagonal plots are treated differently: a univariate distribution plot is drawn to show the marginal distribution of the data in each column. It is also possible to show a subset of variables or plot different variables on the rows and columns. This is a high-level interface for :class:`PairGrid` that is intended to make it easy to draw a few common styles. You should use :class:`PairGrid` directly if you need more flexibility. Parameters ---------- data : `pandas.DataFrame` Tidy (long-form) dataframe where each column is a variable and each row is an observation. hue : name of variable in ``data`` Variable in ``data`` to map plot aspects to different colors. hue_order : list of strings Order for the levels of the hue variable in the palette palette : dict or seaborn color palette Set of colors for mapping the ``hue`` variable. If a dict, keys should be values in the ``hue`` variable. vars : list of variable names Variables within ``data`` to use, otherwise use every column with a numeric datatype. {x, y}_vars : lists of variable names Variables within ``data`` to use separately for the rows and columns of the figure; i.e. to make a non-square plot. kind : {'scatter', 'kde', 'hist', 'reg'} Kind of plot to make. diag_kind : {'auto', 'hist', 'kde', None} Kind of plot for the diagonal subplots. If 'auto', choose based on whether or not ``hue`` is used. markers : single matplotlib marker code or list Either the marker to use for all scatterplot points or a list of markers with a length the same as the number of levels in the hue variable so that differently colored points will also have different scatterplot markers. height : scalar Height (in inches) of each facet. aspect : scalar Aspect * height gives the width (in inches) of each facet. corner : bool If True, don't add axes to the upper (off-diagonal) triangle of the grid, making this a "corner" plot. dropna : boolean Drop missing values from the data before plotting. {plot, diag, grid}_kws : dicts Dictionaries of keyword arguments. ``plot_kws`` are passed to the bivariate plotting function, ``diag_kws`` are passed to the univariate plotting function, and ``grid_kws`` are passed to the :class:`PairGrid` constructor. Returns ------- grid : :class:`PairGrid` Returns the underlying :class:`PairGrid` instance for further tweaking. See Also -------- PairGrid : Subplot grid for more flexible plotting of pairwise relationships. JointGrid : Grid for plotting joint and marginal distributions of two variables. Examples -------- .. include:: ../docstrings/pairplot.rst |
171,862 | from __future__ import annotations
from itertools import product
from inspect import signature
import warnings
from textwrap import dedent
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from ._oldcore import VectorPlotter, variable_type, categorical_order
from ._compat import share_axis
from . import utils
from .utils import (
adjust_legend_subtitles, _check_argument, _draw_figure, _disable_autolayout
)
from .palettes import color_palette, blend_palette
from ._docstrings import (
DocstringComponents,
_core_docs,
)
class JointGrid(_BaseGrid):
"""Grid for drawing a bivariate plot with marginal univariate plots.
Many plots can be drawn by using the figure-level interface :func:`jointplot`.
Use this class directly when you need more flexibility.
"""
def __init__(
self, data=None, *,
x=None, y=None, hue=None,
height=6, ratio=5, space=.2,
palette=None, hue_order=None, hue_norm=None,
dropna=False, xlim=None, ylim=None, marginal_ticks=False,
):
# Set up the subplot grid
f = plt.figure(figsize=(height, height))
gs = plt.GridSpec(ratio + 1, ratio + 1)
ax_joint = f.add_subplot(gs[1:, :-1])
ax_marg_x = f.add_subplot(gs[0, :-1], sharex=ax_joint)
ax_marg_y = f.add_subplot(gs[1:, -1], sharey=ax_joint)
self._figure = f
self.ax_joint = ax_joint
self.ax_marg_x = ax_marg_x
self.ax_marg_y = ax_marg_y
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax_marg_x.get_xticklabels(), visible=False)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
plt.setp(ax_marg_x.get_xticklabels(minor=True), visible=False)
plt.setp(ax_marg_y.get_yticklabels(minor=True), visible=False)
# Turn off the ticks on the density axis for the marginal plots
if not marginal_ticks:
plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(minor=True), visible=False)
plt.setp(ax_marg_y.get_xticklabels(minor=True), visible=False)
ax_marg_x.yaxis.grid(False)
ax_marg_y.xaxis.grid(False)
# Process the input variables
p = VectorPlotter(data=data, variables=dict(x=x, y=y, hue=hue))
plot_data = p.plot_data.loc[:, p.plot_data.notna().any()]
# Possibly drop NA
if dropna:
plot_data = plot_data.dropna()
def get_var(var):
vector = plot_data.get(var, None)
if vector is not None:
vector = vector.rename(p.variables.get(var, None))
return vector
self.x = get_var("x")
self.y = get_var("y")
self.hue = get_var("hue")
for axis in "xy":
name = p.variables.get(axis, None)
if name is not None:
getattr(ax_joint, f"set_{axis}label")(name)
if xlim is not None:
ax_joint.set_xlim(xlim)
if ylim is not None:
ax_joint.set_ylim(ylim)
# Store the semantic mapping parameters for axes-level functions
self._hue_params = dict(palette=palette, hue_order=hue_order, hue_norm=hue_norm)
# Make the grid look nice
utils.despine(f)
if not marginal_ticks:
utils.despine(ax=ax_marg_x, left=True)
utils.despine(ax=ax_marg_y, bottom=True)
for axes in [ax_marg_x, ax_marg_y]:
for axis in [axes.xaxis, axes.yaxis]:
axis.label.set_visible(False)
f.tight_layout()
f.subplots_adjust(hspace=space, wspace=space)
def _inject_kwargs(self, func, kws, params):
"""Add params to kws if they are accepted by func."""
func_params = signature(func).parameters
for key, val in params.items():
if key in func_params:
kws.setdefault(key, val)
def plot(self, joint_func, marginal_func, **kwargs):
"""Draw the plot by passing functions for joint and marginal axes.
This method passes the ``kwargs`` dictionary to both functions. If you
need more control, call :meth:`JointGrid.plot_joint` and
:meth:`JointGrid.plot_marginals` directly with specific parameters.
Parameters
----------
joint_func, marginal_func : callables
Functions to draw the bivariate and univariate plots. See methods
referenced above for information about the required characteristics
of these functions.
kwargs
Additional keyword arguments are passed to both functions.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
self.plot_marginals(marginal_func, **kwargs)
self.plot_joint(joint_func, **kwargs)
return self
def plot_joint(self, func, **kwargs):
"""Draw a bivariate plot on the joint axes of the grid.
Parameters
----------
func : plotting callable
If a seaborn function, it should accept ``x`` and ``y``. Otherwise,
it must accept ``x`` and ``y`` vectors of data as the first two
positional arguments, and it must plot on the "current" axes.
If ``hue`` was defined in the class constructor, the function must
accept ``hue`` as a parameter.
kwargs
Keyword argument are passed to the plotting function.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
kwargs["ax"] = self.ax_joint
else:
plt.sca(self.ax_joint)
if self.hue is not None:
kwargs["hue"] = self.hue
self._inject_kwargs(func, kwargs, self._hue_params)
if str(func.__module__).startswith("seaborn"):
func(x=self.x, y=self.y, **kwargs)
else:
func(self.x, self.y, **kwargs)
return self
def plot_marginals(self, func, **kwargs):
"""Draw univariate plots on each marginal axes.
Parameters
----------
func : plotting callable
If a seaborn function, it should accept ``x`` and ``y`` and plot
when only one of them is defined. Otherwise, it must accept a vector
of data as the first positional argument and determine its orientation
using the ``vertical`` parameter, and it must plot on the "current" axes.
If ``hue`` was defined in the class constructor, it must accept ``hue``
as a parameter.
kwargs
Keyword argument are passed to the plotting function.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
seaborn_func = (
str(func.__module__).startswith("seaborn")
# deprecated distplot has a legacy API, special case it
and not func.__name__ == "distplot"
)
func_params = signature(func).parameters
kwargs = kwargs.copy()
if self.hue is not None:
kwargs["hue"] = self.hue
self._inject_kwargs(func, kwargs, self._hue_params)
if "legend" in func_params:
kwargs.setdefault("legend", False)
if "orientation" in func_params:
# e.g. plt.hist
orient_kw_x = {"orientation": "vertical"}
orient_kw_y = {"orientation": "horizontal"}
elif "vertical" in func_params:
# e.g. sns.distplot (also how did this get backwards?)
orient_kw_x = {"vertical": False}
orient_kw_y = {"vertical": True}
if seaborn_func:
func(x=self.x, ax=self.ax_marg_x, **kwargs)
else:
plt.sca(self.ax_marg_x)
func(self.x, **orient_kw_x, **kwargs)
if seaborn_func:
func(y=self.y, ax=self.ax_marg_y, **kwargs)
else:
plt.sca(self.ax_marg_y)
func(self.y, **orient_kw_y, **kwargs)
self.ax_marg_x.yaxis.get_label().set_visible(False)
self.ax_marg_y.xaxis.get_label().set_visible(False)
return self
def refline(
self, *, x=None, y=None, joint=True, marginal=True,
color='.5', linestyle='--', **line_kws
):
"""Add a reference line(s) to joint and/or marginal axes.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
joint, marginal : bools
Whether to add the reference line(s) to the joint/marginal axes.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s).
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
if joint:
self.ax_joint.axvline(x, **line_kws)
if marginal:
self.ax_marg_x.axvline(x, **line_kws)
if y is not None:
if joint:
self.ax_joint.axhline(y, **line_kws)
if marginal:
self.ax_marg_y.axhline(y, **line_kws)
return self
def set_axis_labels(self, xlabel="", ylabel="", **kwargs):
"""Set axis labels on the bivariate axes.
Parameters
----------
xlabel, ylabel : strings
Label names for the x and y variables.
kwargs : key, value mappings
Other keyword arguments are passed to the following functions:
- :meth:`matplotlib.axes.Axes.set_xlabel`
- :meth:`matplotlib.axes.Axes.set_ylabel`
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
self.ax_joint.set_xlabel(xlabel, **kwargs)
self.ax_joint.set_ylabel(ylabel, **kwargs)
return self
JointGrid.__init__.__doc__ = """\
Set up the grid of subplots and store data internally for easy plotting.
Parameters
----------
{params.core.data}
{params.core.xy}
height : number
Size of each side of the figure in inches (it will be square).
ratio : number
Ratio of joint axes height to marginal axes height.
space : number
Space between the joint and marginal axes
dropna : bool
If True, remove missing observations before plotting.
{{x, y}}lim : pairs of numbers
Set axis limits to these values before plotting.
marginal_ticks : bool
If False, suppress ticks on the count/density axis of the marginal plots.
{params.core.hue}
Note: unlike in :class:`FacetGrid` or :class:`PairGrid`, the axes-level
functions must support ``hue`` to use it in :class:`JointGrid`.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
See Also
--------
{seealso.jointplot}
{seealso.pairgrid}
{seealso.pairplot}
Examples
--------
.. include:: ../docstrings/JointGrid.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def _check_argument(param, options, value):
"""Raise if value for param is not in options."""
if value not in options:
raise ValueError(
f"`{param}` must be one of {options}, but {repr(value)} was passed."
)
def blend_palette(colors, n_colors=6, as_cmap=False, input="rgb"):
"""Make a palette that blends between a list of colors.
Parameters
----------
colors : sequence of colors in various formats interpreted by `input`
hex code, html color name, or tuple in `input` space.
n_colors : int, optional
Number of colors in the palette.
as_cmap : bool, optional
If True, return a :class:`matplotlib.colors.ListedColormap`.
Returns
-------
palette
list of RGB tuples or :class:`matplotlib.colors.ListedColormap`
Examples
--------
.. include: ../docstrings/blend_palette.rst
"""
colors = [_color_to_rgb(color, input) for color in colors]
name = "blend"
pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)
if not as_cmap:
rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] # no alpha
pal = _ColorPalette(map(tuple, rgb_array))
return pal
def histplot(
data=None, *,
# Vector variables
x=None, y=None, hue=None, weights=None,
# Histogram computation parameters
stat="count", bins="auto", binwidth=None, binrange=None,
discrete=None, cumulative=False, common_bins=True, common_norm=True,
# Histogram appearance parameters
multiple="layer", element="bars", fill=True, shrink=1,
# Histogram smoothing with a kernel density estimate
kde=False, kde_kws=None, line_kws=None,
# Bivariate histogram parameters
thresh=0, pthresh=None, pmax=None, cbar=False, cbar_ax=None, cbar_kws=None,
# Hue mapping parameters
palette=None, hue_order=None, hue_norm=None, color=None,
# Axes information
log_scale=None, legend=True, ax=None,
# Other appearance keywords
**kwargs,
):
p = _DistributionPlotter(
data=data,
variables=_DistributionPlotter.get_semantics(locals())
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
if ax is None:
ax = plt.gca()
p._attach(ax, log_scale=log_scale)
if p.univariate: # Note, bivariate plots won't cycle
if fill:
method = ax.bar if element == "bars" else ax.fill_between
else:
method = ax.plot
color = _default_color(method, hue, color, kwargs)
if not p.has_xy_data:
return ax
# Default to discrete bins for categorical variables
if discrete is None:
discrete = p._default_discrete()
estimate_kws = dict(
stat=stat,
bins=bins,
binwidth=binwidth,
binrange=binrange,
discrete=discrete,
cumulative=cumulative,
)
if p.univariate:
p.plot_univariate_histogram(
multiple=multiple,
element=element,
fill=fill,
shrink=shrink,
common_norm=common_norm,
common_bins=common_bins,
kde=kde,
kde_kws=kde_kws,
color=color,
legend=legend,
estimate_kws=estimate_kws,
line_kws=line_kws,
**kwargs,
)
else:
p.plot_bivariate_histogram(
common_bins=common_bins,
common_norm=common_norm,
thresh=thresh,
pthresh=pthresh,
pmax=pmax,
color=color,
legend=legend,
cbar=cbar,
cbar_ax=cbar_ax,
cbar_kws=cbar_kws,
estimate_kws=estimate_kws,
**kwargs,
)
return ax
histplot.__doc__ = """\
Plot univariate or bivariate histograms to show distributions of datasets.
A histogram is a classic visualization tool that represents the distribution
of one or more variables by counting the number of observations that fall within
discrete bins.
This function can normalize the statistic computed within each bin to estimate
frequency, density or probability mass, and it can add a smooth curve obtained
using a kernel density estimate, similar to :func:`kdeplot`.
More information is provided in the :ref:`user guide <tutorial_hist>`.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
weights : vector or key in ``data``
If provided, weight the contribution of the corresponding data points
towards the count in each bin by these factors.
{params.hist.stat}
{params.hist.bins}
{params.hist.binwidth}
{params.hist.binrange}
discrete : bool
If True, default to ``binwidth=1`` and draw the bars so that they are
centered on their corresponding data points. This avoids "gaps" that may
otherwise appear when using discrete (integer) data.
cumulative : bool
If True, plot the cumulative counts as bins increase.
common_bins : bool
If True, use the same bins when semantic variables produce multiple
plots. If using a reference rule to determine the bins, it will be computed
with the full dataset.
common_norm : bool
If True and using a normalized statistic, the normalization will apply over
the full dataset. Otherwise, normalize each histogram independently.
multiple : {{"layer", "dodge", "stack", "fill"}}
Approach to resolving multiple elements when semantic mapping creates subsets.
Only relevant with univariate data.
element : {{"bars", "step", "poly"}}
Visual representation of the histogram statistic.
Only relevant with univariate data.
fill : bool
If True, fill in the space under the histogram.
Only relevant with univariate data.
shrink : number
Scale the width of each bar relative to the binwidth by this factor.
Only relevant with univariate data.
kde : bool
If True, compute a kernel density estimate to smooth the distribution
and show on the plot as (one or more) line(s).
Only relevant with univariate data.
kde_kws : dict
Parameters that control the KDE computation, as in :func:`kdeplot`.
line_kws : dict
Parameters that control the KDE visualization, passed to
:meth:`matplotlib.axes.Axes.plot`.
thresh : number or None
Cells with a statistic less than or equal to this value will be transparent.
Only relevant with bivariate data.
pthresh : number or None
Like ``thresh``, but a value in [0, 1] such that cells with aggregate counts
(or other statistics, when used) up to this proportion of the total will be
transparent.
pmax : number or None
A value in [0, 1] that sets that saturation point for the colormap at a value
such that cells below constitute this proportion of the total count (or
other statistic, when used).
{params.dist.cbar}
{params.dist.cbar_ax}
{params.dist.cbar_kws}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.core.color}
{params.dist.log_scale}
{params.dist.legend}
{params.core.ax}
kwargs
Other keyword arguments are passed to one of the following matplotlib
functions:
- :meth:`matplotlib.axes.Axes.bar` (univariate, element="bars")
- :meth:`matplotlib.axes.Axes.fill_between` (univariate, other element, fill=True)
- :meth:`matplotlib.axes.Axes.plot` (univariate, other element, fill=False)
- :meth:`matplotlib.axes.Axes.pcolormesh` (bivariate)
Returns
-------
{returns.ax}
See Also
--------
{seealso.displot}
{seealso.kdeplot}
{seealso.rugplot}
{seealso.ecdfplot}
{seealso.jointplot}
Notes
-----
The choice of bins for computing and plotting a histogram can exert
substantial influence on the insights that one is able to draw from the
visualization. If the bins are too large, they may erase important features.
On the other hand, bins that are too small may be dominated by random
variability, obscuring the shape of the true underlying distribution. The
default bin size is determined using a reference rule that depends on the
sample size and variance. This works well in many cases, (i.e., with
"well-behaved" data) but it fails in others. It is always a good to try
different bin sizes to be sure that you are not missing something important.
This function allows you to specify bins in several different ways, such as
by setting the total number of bins to use, the width of each bin, or the
specific locations where the bins should break.
Examples
--------
.. include:: ../docstrings/histplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def kdeplot(
data=None, *, x=None, y=None, hue=None, weights=None,
palette=None, hue_order=None, hue_norm=None, color=None, fill=None,
multiple="layer", common_norm=True, common_grid=False, cumulative=False,
bw_method="scott", bw_adjust=1, warn_singular=True, log_scale=None,
levels=10, thresh=.05, gridsize=200, cut=3, clip=None,
legend=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None,
**kwargs,
):
# --- Start with backwards compatability for versions < 0.11.0 ----------------
# Handle (past) deprecation of `data2`
if "data2" in kwargs:
msg = "`data2` has been removed (replaced by `y`); please update your code."
TypeError(msg)
# Handle deprecation of `vertical`
vertical = kwargs.pop("vertical", None)
if vertical is not None:
if vertical:
action_taken = "assigning data to `y`."
if x is None:
data, y = y, data
else:
x, y = y, x
else:
action_taken = "assigning data to `x`."
msg = textwrap.dedent(f"""\n
The `vertical` parameter is deprecated; {action_taken}
This will become an error in seaborn v0.13.0; please update your code.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
# Handle deprecation of `bw`
bw = kwargs.pop("bw", None)
if bw is not None:
msg = textwrap.dedent(f"""\n
The `bw` parameter is deprecated in favor of `bw_method` and `bw_adjust`.
Setting `bw_method={bw}`, but please see the docs for the new parameters
and update your code. This will become an error in seaborn v0.13.0.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
bw_method = bw
# Handle deprecation of `kernel`
if kwargs.pop("kernel", None) is not None:
msg = textwrap.dedent("""\n
Support for alternate kernels has been removed; using Gaussian kernel.
This will become an error in seaborn v0.13.0; please update your code.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
# Handle deprecation of shade_lowest
shade_lowest = kwargs.pop("shade_lowest", None)
if shade_lowest is not None:
if shade_lowest:
thresh = 0
msg = textwrap.dedent(f"""\n
`shade_lowest` has been replaced by `thresh`; setting `thresh={thresh}.
This will become an error in seaborn v0.13.0; please update your code.
""")
warnings.warn(msg, UserWarning, stacklevel=2)
# Handle "soft" deprecation of shade `shade` is not really the right
# terminology here, but unlike some of the other deprecated parameters it
# is probably very commonly used and much hard to remove. This is therefore
# going to be a longer process where, first, `fill` will be introduced and
# be used throughout the documentation. In 0.12, when kwarg-only
# enforcement hits, we can remove the shade/shade_lowest out of the
# function signature all together and pull them out of the kwargs. Then we
# can actually fire a FutureWarning, and eventually remove.
shade = kwargs.pop("shade", None)
if shade is not None:
fill = shade
msg = textwrap.dedent(f"""\n
`shade` is now deprecated in favor of `fill`; setting `fill={shade}`.
This will become an error in seaborn v0.14.0; please update your code.
""")
warnings.warn(msg, FutureWarning, stacklevel=2)
# Handle `n_levels`
# This was never in the formal API but it was processed, and appeared in an
# example. We can treat as an alias for `levels` now and deprecate later.
levels = kwargs.pop("n_levels", levels)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
p = _DistributionPlotter(
data=data,
variables=_DistributionPlotter.get_semantics(locals()),
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
if ax is None:
ax = plt.gca()
p._attach(ax, allowed_types=["numeric", "datetime"], log_scale=log_scale)
method = ax.fill_between if fill else ax.plot
color = _default_color(method, hue, color, kwargs)
if not p.has_xy_data:
return ax
# Pack the kwargs for statistics.KDE
estimate_kws = dict(
bw_method=bw_method,
bw_adjust=bw_adjust,
gridsize=gridsize,
cut=cut,
clip=clip,
cumulative=cumulative,
)
if p.univariate:
plot_kws = kwargs.copy()
p.plot_univariate_density(
multiple=multiple,
common_norm=common_norm,
common_grid=common_grid,
fill=fill,
color=color,
legend=legend,
warn_singular=warn_singular,
estimate_kws=estimate_kws,
**plot_kws,
)
else:
p.plot_bivariate_density(
common_norm=common_norm,
fill=fill,
levels=levels,
thresh=thresh,
legend=legend,
color=color,
warn_singular=warn_singular,
cbar=cbar,
cbar_ax=cbar_ax,
cbar_kws=cbar_kws,
estimate_kws=estimate_kws,
**kwargs,
)
return ax
kdeplot.__doc__ = """\
Plot univariate or bivariate distributions using kernel density estimation.
A kernel density estimate (KDE) plot is a method for visualizing the
distribution of observations in a dataset, analogous to a histogram. KDE
represents the data using a continuous probability density curve in one or
more dimensions.
The approach is explained further in the :ref:`user guide <tutorial_kde>`.
Relative to a histogram, KDE can produce a plot that is less cluttered and
more interpretable, especially when drawing multiple distributions. But it
has the potential to introduce distortions if the underlying distribution is
bounded or not smooth. Like a histogram, the quality of the representation
also depends on the selection of good smoothing parameters.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
weights : vector or key in ``data``
If provided, weight the kernel density estimation using these values.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.core.color}
fill : bool or None
If True, fill in the area under univariate density curves or between
bivariate contours. If None, the default depends on ``multiple``.
{params.dist.multiple}
common_norm : bool
If True, scale each conditional density by the number of observations
such that the total area under all densities sums to 1. Otherwise,
normalize each density independently.
common_grid : bool
If True, use the same evaluation grid for each kernel density estimate.
Only relevant with univariate data.
{params.kde.cumulative}
{params.kde.bw_method}
{params.kde.bw_adjust}
warn_singular : bool
If True, issue a warning when trying to estimate the density of data
with zero variance.
{params.dist.log_scale}
levels : int or vector
Number of contour levels or values to draw contours at. A vector argument
must have increasing values in [0, 1]. Levels correspond to iso-proportions
of the density: e.g., 20% of the probability mass will lie below the
contour drawn for 0.2. Only relevant with bivariate data.
thresh : number in [0, 1]
Lowest iso-proportion level at which to draw a contour line. Ignored when
``levels`` is a vector. Only relevant with bivariate data.
gridsize : int
Number of points on each dimension of the evaluation grid.
{params.kde.cut}
{params.kde.clip}
{params.dist.legend}
{params.dist.cbar}
{params.dist.cbar_ax}
{params.dist.cbar_kws}
{params.core.ax}
kwargs
Other keyword arguments are passed to one of the following matplotlib
functions:
- :meth:`matplotlib.axes.Axes.plot` (univariate, ``fill=False``),
- :meth:`matplotlib.axes.Axes.fill_between` (univariate, ``fill=True``),
- :meth:`matplotlib.axes.Axes.contour` (bivariate, ``fill=False``),
- :meth:`matplotlib.axes.contourf` (bivariate, ``fill=True``).
Returns
-------
{returns.ax}
See Also
--------
{seealso.displot}
{seealso.histplot}
{seealso.ecdfplot}
{seealso.jointplot}
{seealso.violinplot}
Notes
-----
The *bandwidth*, or standard deviation of the smoothing kernel, is an
important parameter. Misspecification of the bandwidth can produce a
distorted representation of the data. Much like the choice of bin width in a
histogram, an over-smoothed curve can erase true features of a
distribution, while an under-smoothed curve can create false features out of
random variability. The rule-of-thumb that sets the default bandwidth works
best when the true distribution is smooth, unimodal, and roughly bell-shaped.
It is always a good idea to check the default behavior by using ``bw_adjust``
to increase or decrease the amount of smoothing.
Because the smoothing algorithm uses a Gaussian kernel, the estimated density
curve can extend to values that do not make sense for a particular dataset.
For example, the curve may be drawn over negative values when smoothing data
that are naturally positive. The ``cut`` and ``clip`` parameters can be used
to control the extent of the curve, but datasets that have many observations
close to a natural boundary may be better served by a different visualization
method.
Similar considerations apply when a dataset is naturally discrete or "spiky"
(containing many repeated observations of the same value). Kernel density
estimation will always produce a smooth curve, which would be misleading
in these situations.
The units on the density axis are a common source of confusion. While kernel
density estimation produces a probability distribution, the height of the curve
at each point gives a density, not a probability. A probability can be obtained
only by integrating the density across a range. The curve is normalized so
that the integral over all possible values is 1, meaning that the scale of
the density axis depends on the data values.
Examples
--------
.. include:: ../docstrings/kdeplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From https://stats.stackexchange.com/questions/798/
a = np.asarray(a)
if len(a) < 2:
return 1
iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))
h = 2 * iqr / (len(a) ** (1 / 3))
# fall back to sqrt(a) bins if iqr is 0
if h == 0:
return int(np.sqrt(a.size))
else:
return int(np.ceil((a.max() - a.min()) / h))
def scatterplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None, legend="auto", ax=None,
**kwargs
):
variables = _ScatterPlotter.get_semantics(locals())
p = _ScatterPlotter(data=data, variables=variables, legend=legend)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, order=style_order)
if ax is None:
ax = plt.gca()
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", None)
kwargs["color"] = _default_color(ax.scatter, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
scatterplot.__doc__ = """\
Draw a scatter plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
Parameters
----------
{params.core.data}
{params.core.xy}
hue : vector or key in `data`
Grouping variable that will produce points with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in `data`
Grouping variable that will produce points with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in `data`
Grouping variable that will produce points with different markers.
Can have a numeric dtype but will always be treated as categorical.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.markers}
{params.rel.style_order}
{params.rel.legend}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.scatter`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.lineplot}
{seealso.stripplot}
{seealso.swarmplot}
Examples
--------
.. include:: ../docstrings/scatterplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def regplot(
data=None, *, x=None, y=None,
x_estimator=None, x_bins=None, x_ci="ci",
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
seed=None, order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None,
truncate=True, dropna=True, x_jitter=None, y_jitter=None,
label=None, color=None, marker="o",
scatter_kws=None, line_kws=None, ax=None
):
plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
scatter, fit_reg, ci, n_boot, units, seed,
order, logistic, lowess, robust, logx,
x_partial, y_partial, truncate, dropna,
x_jitter, y_jitter, color, label)
if ax is None:
ax = plt.gca()
scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
scatter_kws["marker"] = marker
line_kws = {} if line_kws is None else copy.copy(line_kws)
plotter.plot(ax, scatter_kws, line_kws)
return ax
regplot.__doc__ = dedent("""\
Plot data and a linear regression model fit.
{model_api}
Parameters
----------
x, y: string, series, or vector array
Input variables. If strings, these should correspond with column names
in ``data``. When pandas objects are used, axes will be labeled with
the series name.
{data}
{x_estimator}
{x_bins}
{x_ci}
{scatter}
{fit_reg}
{ci}
{n_boot}
{units}
{seed}
{order}
{logistic}
{lowess}
{robust}
{logx}
{xy_partial}
{truncate}
{xy_jitter}
label : string
Label to apply to either the scatterplot or regression line (if
``scatter`` is ``False``) for use in a legend.
color : matplotlib color
Color to apply to all plot elements; will be superseded by colors
passed in ``scatter_kws`` or ``line_kws``.
marker : matplotlib marker code
Marker to use for the scatterplot glyphs.
{scatter_line_kws}
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
Returns
-------
ax : matplotlib Axes
The Axes object containing the plot.
See Also
--------
lmplot : Combine :func:`regplot` and :class:`FacetGrid` to plot multiple
linear relationships in a dataset.
jointplot : Combine :func:`regplot` and :class:`JointGrid` (when used with
``kind="reg"``).
pairplot : Combine :func:`regplot` and :class:`PairGrid` (when used with
``kind="reg"``).
residplot : Plot the residuals of a linear regression model.
Notes
-----
{regplot_vs_lmplot}
It's also easy to combine :func:`regplot` and :class:`JointGrid` or
:class:`PairGrid` through the :func:`jointplot` and :func:`pairplot`
functions, although these do not directly accept all of :func:`regplot`'s
parameters.
Examples
--------
.. include: ../docstrings/regplot.rst
""").format(**_regression_docs)
def residplot(
data=None, *, x=None, y=None,
x_partial=None, y_partial=None, lowess=False,
order=1, robust=False, dropna=True, label=None, color=None,
scatter_kws=None, line_kws=None, ax=None
):
"""Plot the residuals of a linear regression.
This function will regress y on x (possibly as a robust or polynomial
regression) and then draw a scatterplot of the residuals. You can
optionally fit a lowess smoother to the residual plot, which can
help in determining if there is structure to the residuals.
Parameters
----------
data : DataFrame, optional
DataFrame to use if `x` and `y` are column names.
x : vector or string
Data or column name in `data` for the predictor variable.
y : vector or string
Data or column name in `data` for the response variable.
{x, y}_partial : vectors or string(s) , optional
These variables are treated as confounding and are removed from
the `x` or `y` variables before plotting.
lowess : boolean, optional
Fit a lowess smoother to the residual scatterplot.
order : int, optional
Order of the polynomial to fit when calculating the residuals.
robust : boolean, optional
Fit a robust linear regression when calculating the residuals.
dropna : boolean, optional
If True, ignore observations with missing data when fitting and
plotting.
label : string, optional
Label that will be used in any plot legends.
color : matplotlib color, optional
Color to use for all elements of the plot.
{scatter, line}_kws : dictionaries, optional
Additional keyword arguments passed to scatter() and plot() for drawing
the components of the plot.
ax : matplotlib axis, optional
Plot into this axis, otherwise grab the current axis or make a new
one if not existing.
Returns
-------
ax: matplotlib axes
Axes with the regression plot.
See Also
--------
regplot : Plot a simple linear regression model.
jointplot : Draw a :func:`residplot` with univariate marginal distributions
(when used with ``kind="resid"``).
Examples
--------
.. include:: ../docstrings/residplot.rst
"""
plotter = _RegressionPlotter(x, y, data, ci=None,
order=order, robust=robust,
x_partial=x_partial, y_partial=y_partial,
dropna=dropna, color=color, label=label)
if ax is None:
ax = plt.gca()
# Calculate the residual from a linear regression
_, yhat, _ = plotter.fit_regression(grid=plotter.x)
plotter.y = plotter.y - yhat
# Set the regression option on the plotter
if lowess:
plotter.lowess = True
else:
plotter.fit_reg = False
# Plot a horizontal line at 0
ax.axhline(0, ls=":", c=".2")
# Draw the scatterplot
scatter_kws = {} if scatter_kws is None else scatter_kws.copy()
line_kws = {} if line_kws is None else line_kws.copy()
plotter.plot(ax, scatter_kws, line_kws)
return ax
def jointplot(
data=None, *, x=None, y=None, hue=None, kind="scatter",
height=6, ratio=5, space=.2, dropna=False, xlim=None, ylim=None,
color=None, palette=None, hue_order=None, hue_norm=None, marginal_ticks=False,
joint_kws=None, marginal_kws=None,
**kwargs
):
# Avoid circular imports
from .relational import scatterplot
from .regression import regplot, residplot
from .distributions import histplot, kdeplot, _freedman_diaconis_bins
if kwargs.pop("ax", None) is not None:
msg = "Ignoring `ax`; jointplot is a figure-level function."
warnings.warn(msg, UserWarning, stacklevel=2)
# Set up empty default kwarg dicts
joint_kws = {} if joint_kws is None else joint_kws.copy()
joint_kws.update(kwargs)
marginal_kws = {} if marginal_kws is None else marginal_kws.copy()
# Handle deprecations of distplot-specific kwargs
distplot_keys = [
"rug", "fit", "hist_kws", "norm_hist" "hist_kws", "rug_kws",
]
unused_keys = []
for key in distplot_keys:
if key in marginal_kws:
unused_keys.append(key)
marginal_kws.pop(key)
if unused_keys and kind != "kde":
msg = (
"The marginal plotting function has changed to `histplot`,"
" which does not accept the following argument(s): {}."
).format(", ".join(unused_keys))
warnings.warn(msg, UserWarning)
# Validate the plot kind
plot_kinds = ["scatter", "hist", "hex", "kde", "reg", "resid"]
_check_argument("kind", plot_kinds, kind)
# Raise early if using `hue` with a kind that does not support it
if hue is not None and kind in ["hex", "reg", "resid"]:
msg = (
f"Use of `hue` with `kind='{kind}'` is not currently supported."
)
raise ValueError(msg)
# Make a colormap based off the plot color
# (Currently used only for kind="hex")
if color is None:
color = "C0"
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [utils.set_hls_values(color_rgb, l=l) # noqa
for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Matplotlib's hexbin plot is not na-robust
if kind == "hex":
dropna = True
# Initialize the JointGrid object
grid = JointGrid(
data=data, x=x, y=y, hue=hue,
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
dropna=dropna, height=height, ratio=ratio, space=space,
xlim=xlim, ylim=ylim, marginal_ticks=marginal_ticks,
)
if grid.hue is not None:
marginal_kws.setdefault("legend", False)
# Plot the data using the grid
if kind.startswith("scatter"):
joint_kws.setdefault("color", color)
grid.plot_joint(scatterplot, **joint_kws)
if grid.hue is None:
marg_func = histplot
else:
marg_func = kdeplot
marginal_kws.setdefault("warn_singular", False)
marginal_kws.setdefault("fill", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(marg_func, **marginal_kws)
elif kind.startswith("hist"):
# TODO process pair parameters for bins, etc. and pass
# to both joint and marginal plots
joint_kws.setdefault("color", color)
grid.plot_joint(histplot, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
marg_x_kws = marginal_kws.copy()
marg_y_kws = marginal_kws.copy()
pair_keys = "bins", "binwidth", "binrange"
for key in pair_keys:
if isinstance(joint_kws.get(key), tuple):
x_val, y_val = joint_kws[key]
marg_x_kws.setdefault(key, x_val)
marg_y_kws.setdefault(key, y_val)
histplot(data=data, x=x, hue=hue, **marg_x_kws, ax=grid.ax_marg_x)
histplot(data=data, y=y, hue=hue, **marg_y_kws, ax=grid.ax_marg_y)
elif kind.startswith("kde"):
joint_kws.setdefault("color", color)
joint_kws.setdefault("warn_singular", False)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("color", color)
if "fill" in joint_kws:
marginal_kws.setdefault("fill", joint_kws["fill"])
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = min(_freedman_diaconis_bins(grid.x), 50)
y_bins = min(_freedman_diaconis_bins(grid.y), 50)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(histplot, **marginal_kws)
elif kind.startswith("reg"):
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", True)
grid.plot_marginals(histplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
histplot(x=x, hue=hue, ax=grid.ax_marg_x, **marginal_kws)
histplot(y=y, hue=hue, ax=grid.ax_marg_y, **marginal_kws)
# Make the main axes active in the matplotlib state machine
plt.sca(grid.ax_joint)
return grid | null |
171,863 | OPT_HIDE = "hide"
OPT_STOP_EXCEPTIONS = "stopatexceptions"
import win32api
import win32ui
def DoGetOption(optsDict, optName, default):
optsDict[optName] = win32ui.GetProfileVal("Debugger Options", optName, default)
def LoadDebuggerOptions():
opts = {}
DoGetOption(opts, OPT_HIDE, 0)
DoGetOption(opts, OPT_STOP_EXCEPTIONS, 1)
return opts | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.