id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
170,910 | from re import finditer
from xml.sax.saxutils import escape, unescape
The provided code snippet includes necessary dependencies for implementing the `string_span_tokenize` function. Write a Python function `def string_span_tokenize(s, sep)` to solve the following problem:
r""" Return the offsets of the tokens in *s*, as a sequence of ``(start, end)`` tuples, by splitting the string at each occurrence of *sep*. >>> from nltk.tokenize.util import string_span_tokenize >>> s = '''Good muffins cost $3.88\nin New York. Please buy me ... two of them.\n\nThanks.''' >>> list(string_span_tokenize(s, " ")) # doctest: +NORMALIZE_WHITESPACE [(0, 4), (5, 12), (13, 17), (18, 26), (27, 30), (31, 36), (37, 37), (38, 44), (45, 48), (49, 55), (56, 58), (59, 73)] :param s: the string to be tokenized :type s: str :param sep: the token separator :type sep: str :rtype: iter(tuple(int, int))
Here is the function:
def string_span_tokenize(s, sep):
r"""
Return the offsets of the tokens in *s*, as a sequence of ``(start, end)``
tuples, by splitting the string at each occurrence of *sep*.
>>> from nltk.tokenize.util import string_span_tokenize
>>> s = '''Good muffins cost $3.88\nin New York. Please buy me
... two of them.\n\nThanks.'''
>>> list(string_span_tokenize(s, " ")) # doctest: +NORMALIZE_WHITESPACE
[(0, 4), (5, 12), (13, 17), (18, 26), (27, 30), (31, 36), (37, 37),
(38, 44), (45, 48), (49, 55), (56, 58), (59, 73)]
:param s: the string to be tokenized
:type s: str
:param sep: the token separator
:type sep: str
:rtype: iter(tuple(int, int))
"""
if len(sep) == 0:
raise ValueError("Token delimiter must not be empty")
left = 0
while True:
try:
right = s.index(sep, left)
if right != 0:
yield left, right
except ValueError:
if left != len(s):
yield left, len(s)
break
left = right + len(sep) | r""" Return the offsets of the tokens in *s*, as a sequence of ``(start, end)`` tuples, by splitting the string at each occurrence of *sep*. >>> from nltk.tokenize.util import string_span_tokenize >>> s = '''Good muffins cost $3.88\nin New York. Please buy me ... two of them.\n\nThanks.''' >>> list(string_span_tokenize(s, " ")) # doctest: +NORMALIZE_WHITESPACE [(0, 4), (5, 12), (13, 17), (18, 26), (27, 30), (31, 36), (37, 37), (38, 44), (45, 48), (49, 55), (56, 58), (59, 73)] :param s: the string to be tokenized :type s: str :param sep: the token separator :type sep: str :rtype: iter(tuple(int, int)) |
170,911 | from re import finditer
from xml.sax.saxutils import escape, unescape
def finditer(pattern: AnyStr, string: AnyStr, flags: _FlagsType = ...) -> Iterator[Match[AnyStr]]: ...
def finditer(pattern: Pattern[AnyStr], string: AnyStr, flags: _FlagsType = ...) -> Iterator[Match[AnyStr]]: ...
The provided code snippet includes necessary dependencies for implementing the `regexp_span_tokenize` function. Write a Python function `def regexp_span_tokenize(s, regexp)` to solve the following problem:
r""" Return the offsets of the tokens in *s*, as a sequence of ``(start, end)`` tuples, by splitting the string at each successive match of *regexp*. >>> from nltk.tokenize.util import regexp_span_tokenize >>> s = '''Good muffins cost $3.88\nin New York. Please buy me ... two of them.\n\nThanks.''' >>> list(regexp_span_tokenize(s, r'\s')) # doctest: +NORMALIZE_WHITESPACE [(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), (38, 44), (45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)] :param s: the string to be tokenized :type s: str :param regexp: regular expression that matches token separators (must not be empty) :type regexp: str :rtype: iter(tuple(int, int))
Here is the function:
def regexp_span_tokenize(s, regexp):
r"""
Return the offsets of the tokens in *s*, as a sequence of ``(start, end)``
tuples, by splitting the string at each successive match of *regexp*.
>>> from nltk.tokenize.util import regexp_span_tokenize
>>> s = '''Good muffins cost $3.88\nin New York. Please buy me
... two of them.\n\nThanks.'''
>>> list(regexp_span_tokenize(s, r'\s')) # doctest: +NORMALIZE_WHITESPACE
[(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36),
(38, 44), (45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)]
:param s: the string to be tokenized
:type s: str
:param regexp: regular expression that matches token separators (must not be empty)
:type regexp: str
:rtype: iter(tuple(int, int))
"""
left = 0
for m in finditer(regexp, s):
right, next = m.span()
if right != left:
yield left, right
left = next
yield left, len(s) | r""" Return the offsets of the tokens in *s*, as a sequence of ``(start, end)`` tuples, by splitting the string at each successive match of *regexp*. >>> from nltk.tokenize.util import regexp_span_tokenize >>> s = '''Good muffins cost $3.88\nin New York. Please buy me ... two of them.\n\nThanks.''' >>> list(regexp_span_tokenize(s, r'\s')) # doctest: +NORMALIZE_WHITESPACE [(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), (38, 44), (45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)] :param s: the string to be tokenized :type s: str :param regexp: regular expression that matches token separators (must not be empty) :type regexp: str :rtype: iter(tuple(int, int)) |
170,912 | from re import finditer
from xml.sax.saxutils import escape, unescape
The provided code snippet includes necessary dependencies for implementing the `spans_to_relative` function. Write a Python function `def spans_to_relative(spans)` to solve the following problem:
r""" Return a sequence of relative spans, given a sequence of spans. >>> from nltk.tokenize import WhitespaceTokenizer >>> from nltk.tokenize.util import spans_to_relative >>> s = '''Good muffins cost $3.88\nin New York. Please buy me ... two of them.\n\nThanks.''' >>> list(spans_to_relative(WhitespaceTokenizer().span_tokenize(s))) # doctest: +NORMALIZE_WHITESPACE [(0, 4), (1, 7), (1, 4), (1, 5), (1, 2), (1, 3), (1, 5), (2, 6), (1, 3), (1, 2), (1, 3), (1, 2), (1, 5), (2, 7)] :param spans: a sequence of (start, end) offsets of the tokens :type spans: iter(tuple(int, int)) :rtype: iter(tuple(int, int))
Here is the function:
def spans_to_relative(spans):
r"""
Return a sequence of relative spans, given a sequence of spans.
>>> from nltk.tokenize import WhitespaceTokenizer
>>> from nltk.tokenize.util import spans_to_relative
>>> s = '''Good muffins cost $3.88\nin New York. Please buy me
... two of them.\n\nThanks.'''
>>> list(spans_to_relative(WhitespaceTokenizer().span_tokenize(s))) # doctest: +NORMALIZE_WHITESPACE
[(0, 4), (1, 7), (1, 4), (1, 5), (1, 2), (1, 3), (1, 5), (2, 6),
(1, 3), (1, 2), (1, 3), (1, 2), (1, 5), (2, 7)]
:param spans: a sequence of (start, end) offsets of the tokens
:type spans: iter(tuple(int, int))
:rtype: iter(tuple(int, int))
"""
prev = 0
for left, right in spans:
yield left - prev, right - left
prev = right | r""" Return a sequence of relative spans, given a sequence of spans. >>> from nltk.tokenize import WhitespaceTokenizer >>> from nltk.tokenize.util import spans_to_relative >>> s = '''Good muffins cost $3.88\nin New York. Please buy me ... two of them.\n\nThanks.''' >>> list(spans_to_relative(WhitespaceTokenizer().span_tokenize(s))) # doctest: +NORMALIZE_WHITESPACE [(0, 4), (1, 7), (1, 4), (1, 5), (1, 2), (1, 3), (1, 5), (2, 6), (1, 3), (1, 2), (1, 3), (1, 2), (1, 5), (2, 7)] :param spans: a sequence of (start, end) offsets of the tokens :type spans: iter(tuple(int, int)) :rtype: iter(tuple(int, int)) |
170,913 | from re import finditer
from xml.sax.saxutils import escape, unescape
The provided code snippet includes necessary dependencies for implementing the `is_cjk` function. Write a Python function `def is_cjk(character)` to solve the following problem:
Python port of Moses' code to check for CJK character. >>> CJKChars().ranges [(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215), (63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)] >>> is_cjk(u'\u33fe') True >>> is_cjk(u'\uFE5F') False :param character: The character that needs to be checked. :type character: char :return: bool
Here is the function:
def is_cjk(character):
"""
Python port of Moses' code to check for CJK character.
>>> CJKChars().ranges
[(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215), (63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)]
>>> is_cjk(u'\u33fe')
True
>>> is_cjk(u'\uFE5F')
False
:param character: The character that needs to be checked.
:type character: char
:return: bool
"""
return any(
[
start <= ord(character) <= end
for start, end in [
(4352, 4607),
(11904, 42191),
(43072, 43135),
(44032, 55215),
(63744, 64255),
(65072, 65103),
(65381, 65500),
(131072, 196607),
]
]
) | Python port of Moses' code to check for CJK character. >>> CJKChars().ranges [(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215), (63744, 64255), (65072, 65103), (65381, 65500), (131072, 196607)] >>> is_cjk(u'\u33fe') True >>> is_cjk(u'\uFE5F') False :param character: The character that needs to be checked. :type character: char :return: bool |
170,914 | from re import finditer
from xml.sax.saxutils import escape, unescape
The provided code snippet includes necessary dependencies for implementing the `xml_escape` function. Write a Python function `def xml_escape(text)` to solve the following problem:
This function transforms the input text into an "escaped" version suitable for well-formed XML formatting. Note that the default xml.sax.saxutils.escape() function don't escape some characters that Moses does so we have to manually add them to the entities dictionary. >>> input_str = ''')| & < > ' " ] [''' >>> expected_output = ''')| & < > ' " ] [''' >>> escape(input_str) == expected_output True >>> xml_escape(input_str) ')| & < > ' " ] [' :param text: The text that needs to be escaped. :type text: str :rtype: str
Here is the function:
def xml_escape(text):
"""
This function transforms the input text into an "escaped" version suitable
for well-formed XML formatting.
Note that the default xml.sax.saxutils.escape() function don't escape
some characters that Moses does so we have to manually add them to the
entities dictionary.
>>> input_str = ''')| & < > ' " ] ['''
>>> expected_output = ''')| & < > ' " ] ['''
>>> escape(input_str) == expected_output
True
>>> xml_escape(input_str)
')| & < > ' " ] ['
:param text: The text that needs to be escaped.
:type text: str
:rtype: str
"""
return escape(
text,
entities={
r"'": r"'",
r'"': r""",
r"|": r"|",
r"[": r"[",
r"]": r"]",
},
) | This function transforms the input text into an "escaped" version suitable for well-formed XML formatting. Note that the default xml.sax.saxutils.escape() function don't escape some characters that Moses does so we have to manually add them to the entities dictionary. >>> input_str = ''')| & < > ' " ] [''' >>> expected_output = ''')| & < > ' " ] [''' >>> escape(input_str) == expected_output True >>> xml_escape(input_str) ')| & < > ' " ] [' :param text: The text that needs to be escaped. :type text: str :rtype: str |
170,915 | from re import finditer
from xml.sax.saxutils import escape, unescape
The provided code snippet includes necessary dependencies for implementing the `xml_unescape` function. Write a Python function `def xml_unescape(text)` to solve the following problem:
This function transforms the "escaped" version suitable for well-formed XML formatting into humanly-readable string. Note that the default xml.sax.saxutils.unescape() function don't unescape some characters that Moses does so we have to manually add them to the entities dictionary. >>> from xml.sax.saxutils import unescape >>> s = ')| & < > ' " ] [' >>> expected = ''')| & < > \' " ] [''' >>> xml_unescape(s) == expected True :param text: The text that needs to be unescaped. :type text: str :rtype: str
Here is the function:
def xml_unescape(text):
"""
This function transforms the "escaped" version suitable
for well-formed XML formatting into humanly-readable string.
Note that the default xml.sax.saxutils.unescape() function don't unescape
some characters that Moses does so we have to manually add them to the
entities dictionary.
>>> from xml.sax.saxutils import unescape
>>> s = ')| & < > ' " ] ['
>>> expected = ''')| & < > \' " ] ['''
>>> xml_unescape(s) == expected
True
:param text: The text that needs to be unescaped.
:type text: str
:rtype: str
"""
return unescape(
text,
entities={
r"'": r"'",
r""": r'"',
r"|": r"|",
r"[": r"[",
r"]": r"]",
},
) | This function transforms the "escaped" version suitable for well-formed XML formatting into humanly-readable string. Note that the default xml.sax.saxutils.unescape() function don't unescape some characters that Moses does so we have to manually add them to the entities dictionary. >>> from xml.sax.saxutils import unescape >>> s = ')| & < > ' " ] [' >>> expected = ''')| & < > \' " ] [''' >>> xml_unescape(s) == expected True :param text: The text that needs to be unescaped. :type text: str :rtype: str |
170,916 | from re import finditer
from xml.sax.saxutils import escape, unescape
The provided code snippet includes necessary dependencies for implementing the `align_tokens` function. Write a Python function `def align_tokens(tokens, sentence)` to solve the following problem:
This module attempt to find the offsets of the tokens in *s*, as a sequence of ``(start, end)`` tuples, given the tokens and also the source string. >>> from nltk.tokenize import TreebankWordTokenizer >>> from nltk.tokenize.util import align_tokens >>> s = str("The plane, bound for St Petersburg, crashed in Egypt's " ... "Sinai desert just 23 minutes after take-off from Sharm el-Sheikh " ... "on Saturday.") >>> tokens = TreebankWordTokenizer().tokenize(s) >>> expected = [(0, 3), (4, 9), (9, 10), (11, 16), (17, 20), (21, 23), ... (24, 34), (34, 35), (36, 43), (44, 46), (47, 52), (52, 54), ... (55, 60), (61, 67), (68, 72), (73, 75), (76, 83), (84, 89), ... (90, 98), (99, 103), (104, 109), (110, 119), (120, 122), ... (123, 131), (131, 132)] >>> output = list(align_tokens(tokens, s)) >>> len(tokens) == len(expected) == len(output) # Check that length of tokens and tuples are the same. True >>> expected == list(align_tokens(tokens, s)) # Check that the output is as expected. True >>> tokens == [s[start:end] for start, end in output] # Check that the slices of the string corresponds to the tokens. True :param tokens: The list of strings that are the result of tokenization :type tokens: list(str) :param sentence: The original string :type sentence: str :rtype: list(tuple(int,int))
Here is the function:
def align_tokens(tokens, sentence):
"""
This module attempt to find the offsets of the tokens in *s*, as a sequence
of ``(start, end)`` tuples, given the tokens and also the source string.
>>> from nltk.tokenize import TreebankWordTokenizer
>>> from nltk.tokenize.util import align_tokens
>>> s = str("The plane, bound for St Petersburg, crashed in Egypt's "
... "Sinai desert just 23 minutes after take-off from Sharm el-Sheikh "
... "on Saturday.")
>>> tokens = TreebankWordTokenizer().tokenize(s)
>>> expected = [(0, 3), (4, 9), (9, 10), (11, 16), (17, 20), (21, 23),
... (24, 34), (34, 35), (36, 43), (44, 46), (47, 52), (52, 54),
... (55, 60), (61, 67), (68, 72), (73, 75), (76, 83), (84, 89),
... (90, 98), (99, 103), (104, 109), (110, 119), (120, 122),
... (123, 131), (131, 132)]
>>> output = list(align_tokens(tokens, s))
>>> len(tokens) == len(expected) == len(output) # Check that length of tokens and tuples are the same.
True
>>> expected == list(align_tokens(tokens, s)) # Check that the output is as expected.
True
>>> tokens == [s[start:end] for start, end in output] # Check that the slices of the string corresponds to the tokens.
True
:param tokens: The list of strings that are the result of tokenization
:type tokens: list(str)
:param sentence: The original string
:type sentence: str
:rtype: list(tuple(int,int))
"""
point = 0
offsets = []
for token in tokens:
try:
start = sentence.index(token, point)
except ValueError as e:
raise ValueError(f'substring "{token}" not found in "{sentence}"') from e
point = start + len(token)
offsets.append((start, point))
return offsets | This module attempt to find the offsets of the tokens in *s*, as a sequence of ``(start, end)`` tuples, given the tokens and also the source string. >>> from nltk.tokenize import TreebankWordTokenizer >>> from nltk.tokenize.util import align_tokens >>> s = str("The plane, bound for St Petersburg, crashed in Egypt's " ... "Sinai desert just 23 minutes after take-off from Sharm el-Sheikh " ... "on Saturday.") >>> tokens = TreebankWordTokenizer().tokenize(s) >>> expected = [(0, 3), (4, 9), (9, 10), (11, 16), (17, 20), (21, 23), ... (24, 34), (34, 35), (36, 43), (44, 46), (47, 52), (52, 54), ... (55, 60), (61, 67), (68, 72), (73, 75), (76, 83), (84, 89), ... (90, 98), (99, 103), (104, 109), (110, 119), (120, 122), ... (123, 131), (131, 132)] >>> output = list(align_tokens(tokens, s)) >>> len(tokens) == len(expected) == len(output) # Check that length of tokens and tuples are the same. True >>> expected == list(align_tokens(tokens, s)) # Check that the output is as expected. True >>> tokens == [s[start:end] for start, end in output] # Check that the slices of the string corresponds to the tokens. True :param tokens: The list of strings that are the result of tokenization :type tokens: list(str) :param sentence: The original string :type sentence: str :rtype: list(tuple(int,int)) |
170,917 | from nltk.tokenize.api import StringTokenizer, TokenizerI
from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize
class LineTokenizer(TokenizerI):
r"""Tokenize a string into its lines, optionally discarding blank lines.
This is similar to ``s.split('\n')``.
>>> from nltk.tokenize import LineTokenizer
>>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks."
>>> LineTokenizer(blanklines='keep').tokenize(s) # doctest: +NORMALIZE_WHITESPACE
['Good muffins cost $3.88', 'in New York. Please buy me',
'two of them.', '', 'Thanks.']
>>> # same as [l for l in s.split('\n') if l.strip()]:
>>> LineTokenizer(blanklines='discard').tokenize(s) # doctest: +NORMALIZE_WHITESPACE
['Good muffins cost $3.88', 'in New York. Please buy me',
'two of them.', 'Thanks.']
:param blanklines: Indicates how blank lines should be handled. Valid values are:
- ``discard``: strip blank lines out of the token list before returning it.
A line is considered blank if it contains only whitespace characters.
- ``keep``: leave all blank lines in the token list.
- ``discard-eof``: if the string ends with a newline, then do not generate
a corresponding token ``''`` after that newline.
"""
def __init__(self, blanklines="discard"):
valid_blanklines = ("discard", "keep", "discard-eof")
if blanklines not in valid_blanklines:
raise ValueError(
"Blank lines must be one of: %s" % " ".join(valid_blanklines)
)
self._blanklines = blanklines
def tokenize(self, s):
lines = s.splitlines()
# If requested, strip off blank lines.
if self._blanklines == "discard":
lines = [l for l in lines if l.rstrip()]
elif self._blanklines == "discard-eof":
if lines and not lines[-1].strip():
lines.pop()
return lines
# discard-eof not implemented
def span_tokenize(self, s):
if self._blanklines == "keep":
yield from string_span_tokenize(s, r"\n")
else:
yield from regexp_span_tokenize(s, r"\n(\s+\n)*")
def line_tokenize(text, blanklines="discard"):
return LineTokenizer(blanklines).tokenize(text) | null |
170,918 | import math
import re
import string
from collections import defaultdict
from typing import Any, Dict, Iterator, List, Match, Optional, Tuple, Union
from nltk.probability import FreqDist
from nltk.tokenize.api import TokenizerI
The provided code snippet includes necessary dependencies for implementing the `_pair_iter` function. Write a Python function `def _pair_iter(iterator)` to solve the following problem:
Yields pairs of tokens from the given iterator such that each input token will appear as the first element in a yielded tuple. The last pair will have None as its second element.
Here is the function:
def _pair_iter(iterator):
"""
Yields pairs of tokens from the given iterator such that each input
token will appear as the first element in a yielded tuple. The last
pair will have None as its second element.
"""
iterator = iter(iterator)
try:
prev = next(iterator)
except StopIteration:
return
for el in iterator:
yield (prev, el)
prev = el
yield (prev, None) | Yields pairs of tokens from the given iterator such that each input token will appear as the first element in a yielded tuple. The last pair will have None as its second element. |
170,919 | import math
import re
import string
from collections import defaultdict
from typing import Any, Dict, Iterator, List, Match, Optional, Tuple, Union
from nltk.probability import FreqDist
from nltk.tokenize.api import TokenizerI
DEBUG_DECISION_FMT = """Text: {text!r} (at offset {period_index})
Sentence break? {break_decision} ({reason})
Collocation? {collocation}
{type1!r}:
known abbreviation: {type1_in_abbrs}
is initial: {type1_is_initial}
{type2!r}:
known sentence starter: {type2_is_sent_starter}
orthographic heuristic suggests is a sentence starter? {type2_ortho_heuristic}
orthographic contexts in training: {type2_ortho_contexts}
"""
def format_debug_decision(d):
return DEBUG_DECISION_FMT.format(**d) | null |
170,920 | import math
import re
import string
from collections import defaultdict
from typing import Any, Dict, Iterator, List, Match, Optional, Tuple, Union
from nltk.probability import FreqDist
from nltk.tokenize.api import TokenizerI
class PunktTrainer(PunktBaseClass):
"""Learns parameters used in Punkt sentence boundary detection."""
def __init__(
self, train_text=None, verbose=False, lang_vars=None, token_cls=PunktToken
):
PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls)
self._type_fdist = FreqDist()
"""A frequency distribution giving the frequency of each
case-normalized token type in the training data."""
self._num_period_toks = 0
"""The number of words ending in period in the training data."""
self._collocation_fdist = FreqDist()
"""A frequency distribution giving the frequency of all
bigrams in the training data where the first word ends in a
period. Bigrams are encoded as tuples of word types.
Especially common collocations are extracted from this
frequency distribution, and stored in
``_params``.``collocations <PunktParameters.collocations>``."""
self._sent_starter_fdist = FreqDist()
"""A frequency distribution giving the frequency of all words
that occur at the training data at the beginning of a sentence
(after the first pass of annotation). Especially common
sentence starters are extracted from this frequency
distribution, and stored in ``_params.sent_starters``.
"""
self._sentbreak_count = 0
"""The total number of sentence breaks identified in training, used for
calculating the frequent sentence starter heuristic."""
self._finalized = True
"""A flag as to whether the training has been finalized by finding
collocations and sentence starters, or whether finalize_training()
still needs to be called."""
if train_text:
self.train(train_text, verbose, finalize=True)
def get_params(self):
"""
Calculates and returns parameters for sentence boundary detection as
derived from training."""
if not self._finalized:
self.finalize_training()
return self._params
# ////////////////////////////////////////////////////////////
# { Customization Variables
# ////////////////////////////////////////////////////////////
ABBREV = 0.3
"""cut-off value whether a 'token' is an abbreviation"""
IGNORE_ABBREV_PENALTY = False
"""allows the disabling of the abbreviation penalty heuristic, which
exponentially disadvantages words that are found at times without a
final period."""
ABBREV_BACKOFF = 5
"""upper cut-off for Mikheev's(2002) abbreviation detection algorithm"""
COLLOCATION = 7.88
"""minimal log-likelihood value that two tokens need to be considered
as a collocation"""
SENT_STARTER = 30
"""minimal log-likelihood value that a token requires to be considered
as a frequent sentence starter"""
INCLUDE_ALL_COLLOCS = False
"""this includes as potential collocations all word pairs where the first
word ends in a period. It may be useful in corpora where there is a lot
of variation that makes abbreviations like Mr difficult to identify."""
INCLUDE_ABBREV_COLLOCS = False
"""this includes as potential collocations all word pairs where the first
word is an abbreviation. Such collocations override the orthographic
heuristic, but not the sentence starter heuristic. This is overridden by
INCLUDE_ALL_COLLOCS, and if both are false, only collocations with initials
and ordinals are considered."""
""""""
MIN_COLLOC_FREQ = 1
"""this sets a minimum bound on the number of times a bigram needs to
appear before it can be considered a collocation, in addition to log
likelihood statistics. This is useful when INCLUDE_ALL_COLLOCS is True."""
# ////////////////////////////////////////////////////////////
# { Training..
# ////////////////////////////////////////////////////////////
def train(self, text, verbose=False, finalize=True):
"""
Collects training data from a given text. If finalize is True, it
will determine all the parameters for sentence boundary detection. If
not, this will be delayed until get_params() or finalize_training() is
called. If verbose is True, abbreviations found will be listed.
"""
# Break the text into tokens; record which token indices correspond to
# line starts and paragraph starts; and determine their types.
self._train_tokens(self._tokenize_words(text), verbose)
if finalize:
self.finalize_training(verbose)
def train_tokens(self, tokens, verbose=False, finalize=True):
"""
Collects training data from a given list of tokens.
"""
self._train_tokens((self._Token(t) for t in tokens), verbose)
if finalize:
self.finalize_training(verbose)
def _train_tokens(self, tokens, verbose):
self._finalized = False
# Ensure tokens are a list
tokens = list(tokens)
# Find the frequency of each case-normalized type. (Don't
# strip off final periods.) Also keep track of the number of
# tokens that end in periods.
for aug_tok in tokens:
self._type_fdist[aug_tok.type] += 1
if aug_tok.period_final:
self._num_period_toks += 1
# Look for new abbreviations, and for types that no longer are
unique_types = self._unique_types(tokens)
for abbr, score, is_add in self._reclassify_abbrev_types(unique_types):
if score >= self.ABBREV:
if is_add:
self._params.abbrev_types.add(abbr)
if verbose:
print(f" Abbreviation: [{score:6.4f}] {abbr}")
else:
if not is_add:
self._params.abbrev_types.remove(abbr)
if verbose:
print(f" Removed abbreviation: [{score:6.4f}] {abbr}")
# Make a preliminary pass through the document, marking likely
# sentence breaks, abbreviations, and ellipsis tokens.
tokens = list(self._annotate_first_pass(tokens))
# Check what contexts each word type can appear in, given the
# case of its first letter.
self._get_orthography_data(tokens)
# We need total number of sentence breaks to find sentence starters
self._sentbreak_count += self._get_sentbreak_count(tokens)
# The remaining heuristics relate to pairs of tokens where the first
# ends in a period.
for aug_tok1, aug_tok2 in _pair_iter(tokens):
if not aug_tok1.period_final or not aug_tok2:
continue
# Is the first token a rare abbreviation?
if self._is_rare_abbrev_type(aug_tok1, aug_tok2):
self._params.abbrev_types.add(aug_tok1.type_no_period)
if verbose:
print(" Rare Abbrev: %s" % aug_tok1.type)
# Does second token have a high likelihood of starting a sentence?
if self._is_potential_sent_starter(aug_tok2, aug_tok1):
self._sent_starter_fdist[aug_tok2.type] += 1
# Is this bigram a potential collocation?
if self._is_potential_collocation(aug_tok1, aug_tok2):
self._collocation_fdist[
(aug_tok1.type_no_period, aug_tok2.type_no_sentperiod)
] += 1
def _unique_types(self, tokens):
return {aug_tok.type for aug_tok in tokens}
def finalize_training(self, verbose=False):
"""
Uses data that has been gathered in training to determine likely
collocations and sentence starters.
"""
self._params.clear_sent_starters()
for typ, log_likelihood in self._find_sent_starters():
self._params.sent_starters.add(typ)
if verbose:
print(f" Sent Starter: [{log_likelihood:6.4f}] {typ!r}")
self._params.clear_collocations()
for (typ1, typ2), log_likelihood in self._find_collocations():
self._params.collocations.add((typ1, typ2))
if verbose:
print(f" Collocation: [{log_likelihood:6.4f}] {typ1!r}+{typ2!r}")
self._finalized = True
# ////////////////////////////////////////////////////////////
# { Overhead reduction
# ////////////////////////////////////////////////////////////
def freq_threshold(
self, ortho_thresh=2, type_thresh=2, colloc_thres=2, sentstart_thresh=2
):
"""
Allows memory use to be reduced after much training by removing data
about rare tokens that are unlikely to have a statistical effect with
further training. Entries occurring above the given thresholds will be
retained.
"""
if ortho_thresh > 1:
old_oc = self._params.ortho_context
self._params.clear_ortho_context()
for tok in self._type_fdist:
count = self._type_fdist[tok]
if count >= ortho_thresh:
self._params.ortho_context[tok] = old_oc[tok]
self._type_fdist = self._freq_threshold(self._type_fdist, type_thresh)
self._collocation_fdist = self._freq_threshold(
self._collocation_fdist, colloc_thres
)
self._sent_starter_fdist = self._freq_threshold(
self._sent_starter_fdist, sentstart_thresh
)
def _freq_threshold(self, fdist, threshold):
"""
Returns a FreqDist containing only data with counts below a given
threshold, as well as a mapping (None -> count_removed).
"""
# We assume that there is more data below the threshold than above it
# and so create a new FreqDist rather than working in place.
res = FreqDist()
num_removed = 0
for tok in fdist:
count = fdist[tok]
if count < threshold:
num_removed += 1
else:
res[tok] += count
res[None] += num_removed
return res
# ////////////////////////////////////////////////////////////
# { Orthographic data
# ////////////////////////////////////////////////////////////
def _get_orthography_data(self, tokens):
"""
Collect information about whether each token type occurs
with different case patterns (i) overall, (ii) at
sentence-initial positions, and (iii) at sentence-internal
positions.
"""
# 'initial' or 'internal' or 'unknown'
context = "internal"
tokens = list(tokens)
for aug_tok in tokens:
# If we encounter a paragraph break, then it's a good sign
# that it's a sentence break. But err on the side of
# caution (by not positing a sentence break) if we just
# saw an abbreviation.
if aug_tok.parastart and context != "unknown":
context = "initial"
# If we're at the beginning of a line, then we can't decide
# between 'internal' and 'initial'.
if aug_tok.linestart and context == "internal":
context = "unknown"
# Find the case-normalized type of the token. If it's a
# sentence-final token, strip off the period.
typ = aug_tok.type_no_sentperiod
# Update the orthographic context table.
flag = _ORTHO_MAP.get((context, aug_tok.first_case), 0)
if flag:
self._params.add_ortho_context(typ, flag)
# Decide whether the next word is at a sentence boundary.
if aug_tok.sentbreak:
if not (aug_tok.is_number or aug_tok.is_initial):
context = "initial"
else:
context = "unknown"
elif aug_tok.ellipsis or aug_tok.abbr:
context = "unknown"
else:
context = "internal"
# ////////////////////////////////////////////////////////////
# { Abbreviations
# ////////////////////////////////////////////////////////////
def _reclassify_abbrev_types(self, types):
"""
(Re)classifies each given token if
- it is period-final and not a known abbreviation; or
- it is not period-final and is otherwise a known abbreviation
by checking whether its previous classification still holds according
to the heuristics of section 3.
Yields triples (abbr, score, is_add) where abbr is the type in question,
score is its log-likelihood with penalties applied, and is_add specifies
whether the present type is a candidate for inclusion or exclusion as an
abbreviation, such that:
- (is_add and score >= 0.3) suggests a new abbreviation; and
- (not is_add and score < 0.3) suggests excluding an abbreviation.
"""
# (While one could recalculate abbreviations from all .-final tokens at
# every iteration, in cases requiring efficiency, the number of tokens
# in the present training document will be much less.)
for typ in types:
# Check some basic conditions, to rule out words that are
# clearly not abbrev_types.
if not _re_non_punct.search(typ) or typ == "##number##":
continue
if typ.endswith("."):
if typ in self._params.abbrev_types:
continue
typ = typ[:-1]
is_add = True
else:
if typ not in self._params.abbrev_types:
continue
is_add = False
# Count how many periods & nonperiods are in the
# candidate.
num_periods = typ.count(".") + 1
num_nonperiods = len(typ) - num_periods + 1
# Let <a> be the candidate without the period, and <b>
# be the period. Find a log likelihood ratio that
# indicates whether <ab> occurs as a single unit (high
# value of log_likelihood), or as two independent units <a> and
# <b> (low value of log_likelihood).
count_with_period = self._type_fdist[typ + "."]
count_without_period = self._type_fdist[typ]
log_likelihood = self._dunning_log_likelihood(
count_with_period + count_without_period,
self._num_period_toks,
count_with_period,
self._type_fdist.N(),
)
# Apply three scaling factors to 'tweak' the basic log
# likelihood ratio:
# F_length: long word -> less likely to be an abbrev
# F_periods: more periods -> more likely to be an abbrev
# F_penalty: penalize occurrences w/o a period
f_length = math.exp(-num_nonperiods)
f_periods = num_periods
f_penalty = int(self.IGNORE_ABBREV_PENALTY) or math.pow(
num_nonperiods, -count_without_period
)
score = log_likelihood * f_length * f_periods * f_penalty
yield typ, score, is_add
def find_abbrev_types(self):
"""
Recalculates abbreviations given type frequencies, despite no prior
determination of abbreviations.
This fails to include abbreviations otherwise found as "rare".
"""
self._params.clear_abbrevs()
tokens = (typ for typ in self._type_fdist if typ and typ.endswith("."))
for abbr, score, _is_add in self._reclassify_abbrev_types(tokens):
if score >= self.ABBREV:
self._params.abbrev_types.add(abbr)
# This function combines the work done by the original code's
# functions `count_orthography_context`, `get_orthography_count`,
# and `get_rare_abbreviations`.
def _is_rare_abbrev_type(self, cur_tok, next_tok):
"""
A word type is counted as a rare abbreviation if...
- it's not already marked as an abbreviation
- it occurs fewer than ABBREV_BACKOFF times
- either it is followed by a sentence-internal punctuation
mark, *or* it is followed by a lower-case word that
sometimes appears with upper case, but never occurs with
lower case at the beginning of sentences.
"""
if cur_tok.abbr or not cur_tok.sentbreak:
return False
# Find the case-normalized type of the token. If it's
# a sentence-final token, strip off the period.
typ = cur_tok.type_no_sentperiod
# Proceed only if the type hasn't been categorized as an
# abbreviation already, and is sufficiently rare...
count = self._type_fdist[typ] + self._type_fdist[typ[:-1]]
if typ in self._params.abbrev_types or count >= self.ABBREV_BACKOFF:
return False
# Record this token as an abbreviation if the next
# token is a sentence-internal punctuation mark.
# [XX] :1 or check the whole thing??
if next_tok.tok[:1] in self._lang_vars.internal_punctuation:
return True
# Record this type as an abbreviation if the next
# token... (i) starts with a lower case letter,
# (ii) sometimes occurs with an uppercase letter,
# and (iii) never occus with an uppercase letter
# sentence-internally.
# [xx] should the check for (ii) be modified??
if next_tok.first_lower:
typ2 = next_tok.type_no_sentperiod
typ2ortho_context = self._params.ortho_context[typ2]
if (typ2ortho_context & _ORTHO_BEG_UC) and not (
typ2ortho_context & _ORTHO_MID_UC
):
return True
# ////////////////////////////////////////////////////////////
# { Log Likelihoods
# ////////////////////////////////////////////////////////////
# helper for _reclassify_abbrev_types:
def _dunning_log_likelihood(count_a, count_b, count_ab, N):
"""
A function that calculates the modified Dunning log-likelihood
ratio scores for abbreviation candidates. The details of how
this works is available in the paper.
"""
p1 = count_b / N
p2 = 0.99
null_hypo = count_ab * math.log(p1) + (count_a - count_ab) * math.log(1.0 - p1)
alt_hypo = count_ab * math.log(p2) + (count_a - count_ab) * math.log(1.0 - p2)
likelihood = null_hypo - alt_hypo
return -2.0 * likelihood
def _col_log_likelihood(count_a, count_b, count_ab, N):
"""
A function that will just compute log-likelihood estimate, in
the original paper it's described in algorithm 6 and 7.
This *should* be the original Dunning log-likelihood values,
unlike the previous log_l function where it used modified
Dunning log-likelihood values
"""
p = count_b / N
p1 = count_ab / count_a
try:
p2 = (count_b - count_ab) / (N - count_a)
except ZeroDivisionError:
p2 = 1
try:
summand1 = count_ab * math.log(p) + (count_a - count_ab) * math.log(1.0 - p)
except ValueError:
summand1 = 0
try:
summand2 = (count_b - count_ab) * math.log(p) + (
N - count_a - count_b + count_ab
) * math.log(1.0 - p)
except ValueError:
summand2 = 0
if count_a == count_ab or p1 <= 0 or p1 >= 1:
summand3 = 0
else:
summand3 = count_ab * math.log(p1) + (count_a - count_ab) * math.log(
1.0 - p1
)
if count_b == count_ab or p2 <= 0 or p2 >= 1:
summand4 = 0
else:
summand4 = (count_b - count_ab) * math.log(p2) + (
N - count_a - count_b + count_ab
) * math.log(1.0 - p2)
likelihood = summand1 + summand2 - summand3 - summand4
return -2.0 * likelihood
# ////////////////////////////////////////////////////////////
# { Collocation Finder
# ////////////////////////////////////////////////////////////
def _is_potential_collocation(self, aug_tok1, aug_tok2):
"""
Returns True if the pair of tokens may form a collocation given
log-likelihood statistics.
"""
return (
(
self.INCLUDE_ALL_COLLOCS
or (self.INCLUDE_ABBREV_COLLOCS and aug_tok1.abbr)
or (aug_tok1.sentbreak and (aug_tok1.is_number or aug_tok1.is_initial))
)
and aug_tok1.is_non_punct
and aug_tok2.is_non_punct
)
def _find_collocations(self):
"""
Generates likely collocations and their log-likelihood.
"""
for types in self._collocation_fdist:
try:
typ1, typ2 = types
except TypeError:
# types may be None after calling freq_threshold()
continue
if typ2 in self._params.sent_starters:
continue
col_count = self._collocation_fdist[types]
typ1_count = self._type_fdist[typ1] + self._type_fdist[typ1 + "."]
typ2_count = self._type_fdist[typ2] + self._type_fdist[typ2 + "."]
if (
typ1_count > 1
and typ2_count > 1
and self.MIN_COLLOC_FREQ < col_count <= min(typ1_count, typ2_count)
):
log_likelihood = self._col_log_likelihood(
typ1_count, typ2_count, col_count, self._type_fdist.N()
)
# Filter out the not-so-collocative
if log_likelihood >= self.COLLOCATION and (
self._type_fdist.N() / typ1_count > typ2_count / col_count
):
yield (typ1, typ2), log_likelihood
# ////////////////////////////////////////////////////////////
# { Sentence-Starter Finder
# ////////////////////////////////////////////////////////////
def _is_potential_sent_starter(self, cur_tok, prev_tok):
"""
Returns True given a token and the token that precedes it if it
seems clear that the token is beginning a sentence.
"""
# If a token (i) is preceded by a sentece break that is
# not a potential ordinal number or initial, and (ii) is
# alphabetic, then it is a a sentence-starter.
return (
prev_tok.sentbreak
and not (prev_tok.is_number or prev_tok.is_initial)
and cur_tok.is_alpha
)
def _find_sent_starters(self):
"""
Uses collocation heuristics for each candidate token to
determine if it frequently starts sentences.
"""
for typ in self._sent_starter_fdist:
if not typ:
continue
typ_at_break_count = self._sent_starter_fdist[typ]
typ_count = self._type_fdist[typ] + self._type_fdist[typ + "."]
if typ_count < typ_at_break_count:
# needed after freq_threshold
continue
log_likelihood = self._col_log_likelihood(
self._sentbreak_count,
typ_count,
typ_at_break_count,
self._type_fdist.N(),
)
if (
log_likelihood >= self.SENT_STARTER
and self._type_fdist.N() / self._sentbreak_count
> typ_count / typ_at_break_count
):
yield typ, log_likelihood
def _get_sentbreak_count(self, tokens):
"""
Returns the number of sentence breaks marked in a given set of
augmented tokens.
"""
return sum(1 for aug_tok in tokens if aug_tok.sentbreak)
class PunktSentenceTokenizer(PunktBaseClass, TokenizerI):
"""
A sentence tokenizer which uses an unsupervised algorithm to build
a model for abbreviation words, collocations, and words that start
sentences; and then uses that model to find sentence boundaries.
This approach has been shown to work well for many European
languages.
"""
def __init__(
self, train_text=None, verbose=False, lang_vars=None, token_cls=PunktToken
):
"""
train_text can either be the sole training text for this sentence
boundary detector, or can be a PunktParameters object.
"""
PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls)
if train_text:
self._params = self.train(train_text, verbose)
def train(self, train_text, verbose=False):
"""
Derives parameters from a given training text, or uses the parameters
given. Repeated calls to this method destroy previous parameters. For
incremental training, instantiate a separate PunktTrainer instance.
"""
if not isinstance(train_text, str):
return train_text
return PunktTrainer(
train_text, lang_vars=self._lang_vars, token_cls=self._Token
).get_params()
# ////////////////////////////////////////////////////////////
# { Tokenization
# ////////////////////////////////////////////////////////////
def tokenize(self, text: str, realign_boundaries: bool = True) -> List[str]:
"""
Given a text, returns a list of the sentences in that text.
"""
return list(self.sentences_from_text(text, realign_boundaries))
def debug_decisions(self, text: str) -> Iterator[Dict[str, Any]]:
"""
Classifies candidate periods as sentence breaks, yielding a dict for
each that may be used to understand why the decision was made.
See format_debug_decision() to help make this output readable.
"""
for match, decision_text in self._match_potential_end_contexts(text):
tokens = self._tokenize_words(decision_text)
tokens = list(self._annotate_first_pass(tokens))
while tokens and not tokens[0].tok.endswith(self._lang_vars.sent_end_chars):
tokens.pop(0)
yield {
"period_index": match.end() - 1,
"text": decision_text,
"type1": tokens[0].type,
"type2": tokens[1].type,
"type1_in_abbrs": bool(tokens[0].abbr),
"type1_is_initial": bool(tokens[0].is_initial),
"type2_is_sent_starter": tokens[1].type_no_sentperiod
in self._params.sent_starters,
"type2_ortho_heuristic": self._ortho_heuristic(tokens[1]),
"type2_ortho_contexts": set(
self._params._debug_ortho_context(tokens[1].type_no_sentperiod)
),
"collocation": (
tokens[0].type_no_sentperiod,
tokens[1].type_no_sentperiod,
)
in self._params.collocations,
"reason": self._second_pass_annotation(tokens[0], tokens[1])
or REASON_DEFAULT_DECISION,
"break_decision": tokens[0].sentbreak,
}
def span_tokenize(
self, text: str, realign_boundaries: bool = True
) -> Iterator[Tuple[int, int]]:
"""
Given a text, generates (start, end) spans of sentences
in the text.
"""
slices = self._slices_from_text(text)
if realign_boundaries:
slices = self._realign_boundaries(text, slices)
for sentence in slices:
yield (sentence.start, sentence.stop)
def sentences_from_text(
self, text: str, realign_boundaries: bool = True
) -> List[str]:
"""
Given a text, generates the sentences in that text by only
testing candidate sentence breaks. If realign_boundaries is
True, includes in the sentence closing punctuation that
follows the period.
"""
return [text[s:e] for s, e in self.span_tokenize(text, realign_boundaries)]
def _get_last_whitespace_index(self, text: str) -> int:
"""
Given a text, find the index of the *last* occurrence of *any*
whitespace character, i.e. " ", "\n", "\t", "\r", etc.
If none is found, return 0.
"""
for i in range(len(text) - 1, -1, -1):
if text[i] in string.whitespace:
return i
return 0
def _match_potential_end_contexts(self, text: str) -> Iterator[Tuple[Match, str]]:
"""
Given a text, find the matches of potential sentence breaks,
alongside the contexts surrounding these sentence breaks.
Since the fix for the ReDOS discovered in issue #2866, we no longer match
the word before a potential end of sentence token. Instead, we use a separate
regex for this. As a consequence, `finditer`'s desire to find non-overlapping
matches no longer aids us in finding the single longest match.
Where previously, we could use::
>>> pst = PunktSentenceTokenizer()
>>> text = "Very bad acting!!! I promise."
>>> list(pst._lang_vars.period_context_re().finditer(text)) # doctest: +SKIP
[<re.Match object; span=(9, 18), match='acting!!!'>]
Now we have to find the word before (i.e. 'acting') separately, and `finditer`
returns::
>>> pst = PunktSentenceTokenizer()
>>> text = "Very bad acting!!! I promise."
>>> list(pst._lang_vars.period_context_re().finditer(text)) # doctest: +NORMALIZE_WHITESPACE
[<re.Match object; span=(15, 16), match='!'>,
<re.Match object; span=(16, 17), match='!'>,
<re.Match object; span=(17, 18), match='!'>]
So, we need to find the word before the match from right to left, and then manually remove
the overlaps. That is what this method does::
>>> pst = PunktSentenceTokenizer()
>>> text = "Very bad acting!!! I promise."
>>> list(pst._match_potential_end_contexts(text))
[(<re.Match object; span=(17, 18), match='!'>, 'acting!!! I')]
:param text: String of one or more sentences
:type text: str
:return: Generator of match-context tuples.
:rtype: Iterator[Tuple[Match, str]]
"""
previous_slice = slice(0, 0)
previous_match = None
for match in self._lang_vars.period_context_re().finditer(text):
# Get the slice of the previous word
before_text = text[previous_slice.stop : match.start()]
index_after_last_space = self._get_last_whitespace_index(before_text)
if index_after_last_space:
# + 1 to exclude the space itself
index_after_last_space += previous_slice.stop + 1
else:
index_after_last_space = previous_slice.start
prev_word_slice = slice(index_after_last_space, match.start())
# If the previous slice does not overlap with this slice, then
# we can yield the previous match and slice. If there is an overlap,
# then we do not yield the previous match and slice.
if previous_match and previous_slice.stop <= prev_word_slice.start:
yield (
previous_match,
text[previous_slice]
+ previous_match.group()
+ previous_match.group("after_tok"),
)
previous_match = match
previous_slice = prev_word_slice
# Yield the last match and context, if it exists
if previous_match:
yield (
previous_match,
text[previous_slice]
+ previous_match.group()
+ previous_match.group("after_tok"),
)
def _slices_from_text(self, text: str) -> Iterator[slice]:
last_break = 0
for match, context in self._match_potential_end_contexts(text):
if self.text_contains_sentbreak(context):
yield slice(last_break, match.end())
if match.group("next_tok"):
# next sentence starts after whitespace
last_break = match.start("next_tok")
else:
# next sentence starts at following punctuation
last_break = match.end()
# The last sentence should not contain trailing whitespace.
yield slice(last_break, len(text.rstrip()))
def _realign_boundaries(
self, text: str, slices: Iterator[slice]
) -> Iterator[slice]:
"""
Attempts to realign punctuation that falls after the period but
should otherwise be included in the same sentence.
For example: "(Sent1.) Sent2." will otherwise be split as::
["(Sent1.", ") Sent1."].
This method will produce::
["(Sent1.)", "Sent2."].
"""
realign = 0
for sentence1, sentence2 in _pair_iter(slices):
sentence1 = slice(sentence1.start + realign, sentence1.stop)
if not sentence2:
if text[sentence1]:
yield sentence1
continue
m = self._lang_vars.re_boundary_realignment.match(text[sentence2])
if m:
yield slice(sentence1.start, sentence2.start + len(m.group(0).rstrip()))
realign = m.end()
else:
realign = 0
if text[sentence1]:
yield sentence1
def text_contains_sentbreak(self, text: str) -> bool:
"""
Returns True if the given text includes a sentence break.
"""
found = False # used to ignore last token
for tok in self._annotate_tokens(self._tokenize_words(text)):
if found:
return True
if tok.sentbreak:
found = True
return False
def sentences_from_text_legacy(self, text: str) -> Iterator[str]:
"""
Given a text, generates the sentences in that text. Annotates all
tokens, rather than just those with possible sentence breaks. Should
produce the same results as ``sentences_from_text``.
"""
tokens = self._annotate_tokens(self._tokenize_words(text))
return self._build_sentence_list(text, tokens)
def sentences_from_tokens(
self, tokens: Iterator[PunktToken]
) -> Iterator[PunktToken]:
"""
Given a sequence of tokens, generates lists of tokens, each list
corresponding to a sentence.
"""
tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens))
sentence = []
for aug_tok in tokens:
sentence.append(aug_tok.tok)
if aug_tok.sentbreak:
yield sentence
sentence = []
if sentence:
yield sentence
def _annotate_tokens(self, tokens: Iterator[PunktToken]) -> Iterator[PunktToken]:
"""
Given a set of tokens augmented with markers for line-start and
paragraph-start, returns an iterator through those tokens with full
annotation including predicted sentence breaks.
"""
# Make a preliminary pass through the document, marking likely
# sentence breaks, abbreviations, and ellipsis tokens.
tokens = self._annotate_first_pass(tokens)
# Make a second pass through the document, using token context
# information to change our preliminary decisions about where
# sentence breaks, abbreviations, and ellipsis occurs.
tokens = self._annotate_second_pass(tokens)
## [XX] TESTING
# tokens = list(tokens)
# self.dump(tokens)
return tokens
def _build_sentence_list(
self, text: str, tokens: Iterator[PunktToken]
) -> Iterator[str]:
"""
Given the original text and the list of augmented word tokens,
construct and return a tokenized list of sentence strings.
"""
# Most of the work here is making sure that we put the right
# pieces of whitespace back in all the right places.
# Our position in the source text, used to keep track of which
# whitespace to add:
pos = 0
# A regular expression that finds pieces of whitespace:
white_space_regexp = re.compile(r"\s*")
sentence = ""
for aug_tok in tokens:
tok = aug_tok.tok
# Find the whitespace before this token, and update pos.
white_space = white_space_regexp.match(text, pos).group()
pos += len(white_space)
# Some of the rules used by the punkt word tokenizer
# strip whitespace out of the text, resulting in tokens
# that contain whitespace in the source text. If our
# token doesn't match, see if adding whitespace helps.
# If so, then use the version with whitespace.
if text[pos : pos + len(tok)] != tok:
pat = r"\s*".join(re.escape(c) for c in tok)
m = re.compile(pat).match(text, pos)
if m:
tok = m.group()
# Move our position pointer to the end of the token.
assert text[pos : pos + len(tok)] == tok
pos += len(tok)
# Add this token. If it's not at the beginning of the
# sentence, then include any whitespace that separated it
# from the previous token.
if sentence:
sentence += white_space
sentence += tok
# If we're at a sentence break, then start a new sentence.
if aug_tok.sentbreak:
yield sentence
sentence = ""
# If the last sentence is empty, discard it.
if sentence:
yield sentence
# [XX] TESTING
def dump(self, tokens: Iterator[PunktToken]) -> None:
print("writing to /tmp/punkt.new...")
with open("/tmp/punkt.new", "w") as outfile:
for aug_tok in tokens:
if aug_tok.parastart:
outfile.write("\n\n")
elif aug_tok.linestart:
outfile.write("\n")
else:
outfile.write(" ")
outfile.write(str(aug_tok))
# ////////////////////////////////////////////////////////////
# { Customization Variables
# ////////////////////////////////////////////////////////////
PUNCTUATION = tuple(";:,.!?")
# ////////////////////////////////////////////////////////////
# { Annotation Procedures
# ////////////////////////////////////////////////////////////
def _annotate_second_pass(
self, tokens: Iterator[PunktToken]
) -> Iterator[PunktToken]:
"""
Performs a token-based classification (section 4) over the given
tokens, making use of the orthographic heuristic (4.1.1), collocation
heuristic (4.1.2) and frequent sentence starter heuristic (4.1.3).
"""
for token1, token2 in _pair_iter(tokens):
self._second_pass_annotation(token1, token2)
yield token1
def _second_pass_annotation(
self, aug_tok1: PunktToken, aug_tok2: Optional[PunktToken]
) -> Optional[str]:
"""
Performs token-based classification over a pair of contiguous tokens
updating the first.
"""
# Is it the last token? We can't do anything then.
if not aug_tok2:
return
if not aug_tok1.period_final:
# We only care about words ending in periods.
return
typ = aug_tok1.type_no_period
next_typ = aug_tok2.type_no_sentperiod
tok_is_initial = aug_tok1.is_initial
# [4.1.2. Collocation Heuristic] If there's a
# collocation between the word before and after the
# period, then label tok as an abbreviation and NOT
# a sentence break. Note that collocations with
# frequent sentence starters as their second word are
# excluded in training.
if (typ, next_typ) in self._params.collocations:
aug_tok1.sentbreak = False
aug_tok1.abbr = True
return REASON_KNOWN_COLLOCATION
# [4.2. Token-Based Reclassification of Abbreviations] If
# the token is an abbreviation or an ellipsis, then decide
# whether we should *also* classify it as a sentbreak.
if (aug_tok1.abbr or aug_tok1.ellipsis) and (not tok_is_initial):
# [4.1.1. Orthographic Heuristic] Check if there's
# orthogrpahic evidence about whether the next word
# starts a sentence or not.
is_sent_starter = self._ortho_heuristic(aug_tok2)
if is_sent_starter == True:
aug_tok1.sentbreak = True
return REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC
# [4.1.3. Frequent Sentence Starter Heruistic] If the
# next word is capitalized, and is a member of the
# frequent-sentence-starters list, then label tok as a
# sentence break.
if aug_tok2.first_upper and next_typ in self._params.sent_starters:
aug_tok1.sentbreak = True
return REASON_ABBR_WITH_SENTENCE_STARTER
# [4.3. Token-Based Detection of Initials and Ordinals]
# Check if any initials or ordinals tokens that are marked
# as sentbreaks should be reclassified as abbreviations.
if tok_is_initial or typ == "##number##":
# [4.1.1. Orthographic Heuristic] Check if there's
# orthogrpahic evidence about whether the next word
# starts a sentence or not.
is_sent_starter = self._ortho_heuristic(aug_tok2)
if is_sent_starter == False:
aug_tok1.sentbreak = False
aug_tok1.abbr = True
if tok_is_initial:
return REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC
return REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC
# Special heuristic for initials: if orthogrpahic
# heuristic is unknown, and next word is always
# capitalized, then mark as abbrev (eg: J. Bach).
if (
is_sent_starter == "unknown"
and tok_is_initial
and aug_tok2.first_upper
and not (self._params.ortho_context[next_typ] & _ORTHO_LC)
):
aug_tok1.sentbreak = False
aug_tok1.abbr = True
return REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC
return
def _ortho_heuristic(self, aug_tok: PunktToken) -> Union[bool, str]:
"""
Decide whether the given token is the first token in a sentence.
"""
# Sentences don't start with punctuation marks:
if aug_tok.tok in self.PUNCTUATION:
return False
ortho_context = self._params.ortho_context[aug_tok.type_no_sentperiod]
# If the word is capitalized, occurs at least once with a
# lower case first letter, and never occurs with an upper case
# first letter sentence-internally, then it's a sentence starter.
if (
aug_tok.first_upper
and (ortho_context & _ORTHO_LC)
and not (ortho_context & _ORTHO_MID_UC)
):
return True
# If the word is lower case, and either (a) we've seen it used
# with upper case, or (b) we've never seen it used
# sentence-initially with lower case, then it's not a sentence
# starter.
if aug_tok.first_lower and (
(ortho_context & _ORTHO_UC) or not (ortho_context & _ORTHO_BEG_LC)
):
return False
# Otherwise, we're not sure.
return "unknown"
The provided code snippet includes necessary dependencies for implementing the `demo` function. Write a Python function `def demo(text, tok_cls=PunktSentenceTokenizer, train_cls=PunktTrainer)` to solve the following problem:
Builds a punkt model and applies it to the same text
Here is the function:
def demo(text, tok_cls=PunktSentenceTokenizer, train_cls=PunktTrainer):
"""Builds a punkt model and applies it to the same text"""
cleanup = (
lambda s: re.compile(r"(?:\r|^\s+)", re.MULTILINE).sub("", s).replace("\n", " ")
)
trainer = train_cls()
trainer.INCLUDE_ALL_COLLOCS = True
trainer.train(text)
sbd = tok_cls(trainer.get_params())
for sentence in sbd.sentences_from_text(text):
print(cleanup(sentence)) | Builds a punkt model and applies it to the same text |
170,921 | import math
import re
try:
import numpy
except ImportError:
pass
from nltk.tokenize.api import TokenizerI
The provided code snippet includes necessary dependencies for implementing the `smooth` function. Write a Python function `def smooth(x, window_len=11, window="flat")` to solve the following problem:
smooth the data using a window with requested size. This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the beginning and end part of the output signal. :param x: the input signal :param window_len: the dimension of the smoothing window; should be an odd integer :param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing. :return: the smoothed signal example:: t=linspace(-2,2,0.1) x=sin(t)+randn(len(t))*0.1 y=smooth(x) :see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve, scipy.signal.lfilter TODO: the window parameter could be the window itself if an array instead of a string
Here is the function:
def smooth(x, window_len=11, window="flat"):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the beginning and end part of the output signal.
:param x: the input signal
:param window_len: the dimension of the smoothing window; should be an odd integer
:param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
:return: the smoothed signal
example::
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
:see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve,
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if window not in ["flat", "hanning", "hamming", "bartlett", "blackman"]:
raise ValueError(
"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
)
s = numpy.r_[2 * x[0] - x[window_len:1:-1], x, 2 * x[-1] - x[-1:-window_len:-1]]
# print(len(s))
if window == "flat": # moving average
w = numpy.ones(window_len, "d")
else:
w = eval("numpy." + window + "(window_len)")
y = numpy.convolve(w / w.sum(), s, mode="same")
return y[window_len - 1 : -window_len + 1] | smooth the data using a window with requested size. This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the beginning and end part of the output signal. :param x: the input signal :param window_len: the dimension of the smoothing window; should be an odd integer :param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing. :return: the smoothed signal example:: t=linspace(-2,2,0.1) x=sin(t)+randn(len(t))*0.1 y=smooth(x) :see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve, scipy.signal.lfilter TODO: the window parameter could be the window itself if an array instead of a string |
170,922 | import math
import re
from nltk.tokenize.api import TokenizerI
class TextTilingTokenizer(TokenizerI):
"""Tokenize a document into topical sections using the TextTiling algorithm.
This algorithm detects subtopic shifts based on the analysis of lexical
co-occurrence patterns.
The process starts by tokenizing the text into pseudosentences of
a fixed size w. Then, depending on the method used, similarity
scores are assigned at sentence gaps. The algorithm proceeds by
detecting the peak differences between these scores and marking
them as boundaries. The boundaries are normalized to the closest
paragraph break and the segmented text is returned.
:param w: Pseudosentence size
:type w: int
:param k: Size (in sentences) of the block used in the block comparison method
:type k: int
:param similarity_method: The method used for determining similarity scores:
`BLOCK_COMPARISON` (default) or `VOCABULARY_INTRODUCTION`.
:type similarity_method: constant
:param stopwords: A list of stopwords that are filtered out (defaults to NLTK's stopwords corpus)
:type stopwords: list(str)
:param smoothing_method: The method used for smoothing the score plot:
`DEFAULT_SMOOTHING` (default)
:type smoothing_method: constant
:param smoothing_width: The width of the window used by the smoothing method
:type smoothing_width: int
:param smoothing_rounds: The number of smoothing passes
:type smoothing_rounds: int
:param cutoff_policy: The policy used to determine the number of boundaries:
`HC` (default) or `LC`
:type cutoff_policy: constant
>>> from nltk.corpus import brown
>>> tt = TextTilingTokenizer(demo_mode=True)
>>> text = brown.raw()[:4000]
>>> s, ss, d, b = tt.tokenize(text)
>>> b
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0]
"""
def __init__(
self,
w=20,
k=10,
similarity_method=BLOCK_COMPARISON,
stopwords=None,
smoothing_method=DEFAULT_SMOOTHING,
smoothing_width=2,
smoothing_rounds=1,
cutoff_policy=HC,
demo_mode=False,
):
if stopwords is None:
from nltk.corpus import stopwords
stopwords = stopwords.words("english")
self.__dict__.update(locals())
del self.__dict__["self"]
def tokenize(self, text):
"""Return a tokenized copy of *text*, where each "token" represents
a separate topic."""
lowercase_text = text.lower()
paragraph_breaks = self._mark_paragraph_breaks(text)
text_length = len(lowercase_text)
# Tokenization step starts here
# Remove punctuation
nopunct_text = "".join(
c for c in lowercase_text if re.match(r"[a-z\-' \n\t]", c)
)
nopunct_par_breaks = self._mark_paragraph_breaks(nopunct_text)
tokseqs = self._divide_to_tokensequences(nopunct_text)
# The morphological stemming step mentioned in the TextTile
# paper is not implemented. A comment in the original C
# implementation states that it offers no benefit to the
# process. It might be interesting to test the existing
# stemmers though.
# words = _stem_words(words)
# Filter stopwords
for ts in tokseqs:
ts.wrdindex_list = [
wi for wi in ts.wrdindex_list if wi[0] not in self.stopwords
]
token_table = self._create_token_table(tokseqs, nopunct_par_breaks)
# End of the Tokenization step
# Lexical score determination
if self.similarity_method == BLOCK_COMPARISON:
gap_scores = self._block_comparison(tokseqs, token_table)
elif self.similarity_method == VOCABULARY_INTRODUCTION:
raise NotImplementedError("Vocabulary introduction not implemented")
else:
raise ValueError(
f"Similarity method {self.similarity_method} not recognized"
)
if self.smoothing_method == DEFAULT_SMOOTHING:
smooth_scores = self._smooth_scores(gap_scores)
else:
raise ValueError(f"Smoothing method {self.smoothing_method} not recognized")
# End of Lexical score Determination
# Boundary identification
depth_scores = self._depth_scores(smooth_scores)
segment_boundaries = self._identify_boundaries(depth_scores)
normalized_boundaries = self._normalize_boundaries(
text, segment_boundaries, paragraph_breaks
)
# End of Boundary Identification
segmented_text = []
prevb = 0
for b in normalized_boundaries:
if b == 0:
continue
segmented_text.append(text[prevb:b])
prevb = b
if prevb < text_length: # append any text that may be remaining
segmented_text.append(text[prevb:])
if not segmented_text:
segmented_text = [text]
if self.demo_mode:
return gap_scores, smooth_scores, depth_scores, segment_boundaries
return segmented_text
def _block_comparison(self, tokseqs, token_table):
"""Implements the block comparison method"""
def blk_frq(tok, block):
ts_occs = filter(lambda o: o[0] in block, token_table[tok].ts_occurences)
freq = sum(tsocc[1] for tsocc in ts_occs)
return freq
gap_scores = []
numgaps = len(tokseqs) - 1
for curr_gap in range(numgaps):
score_dividend, score_divisor_b1, score_divisor_b2 = 0.0, 0.0, 0.0
score = 0.0
# adjust window size for boundary conditions
if curr_gap < self.k - 1:
window_size = curr_gap + 1
elif curr_gap > numgaps - self.k:
window_size = numgaps - curr_gap
else:
window_size = self.k
b1 = [ts.index for ts in tokseqs[curr_gap - window_size + 1 : curr_gap + 1]]
b2 = [ts.index for ts in tokseqs[curr_gap + 1 : curr_gap + window_size + 1]]
for t in token_table:
score_dividend += blk_frq(t, b1) * blk_frq(t, b2)
score_divisor_b1 += blk_frq(t, b1) ** 2
score_divisor_b2 += blk_frq(t, b2) ** 2
try:
score = score_dividend / math.sqrt(score_divisor_b1 * score_divisor_b2)
except ZeroDivisionError:
pass # score += 0.0
gap_scores.append(score)
return gap_scores
def _smooth_scores(self, gap_scores):
"Wraps the smooth function from the SciPy Cookbook"
return list(
smooth(numpy.array(gap_scores[:]), window_len=self.smoothing_width + 1)
)
def _mark_paragraph_breaks(self, text):
"""Identifies indented text or line breaks as the beginning of
paragraphs"""
MIN_PARAGRAPH = 100
pattern = re.compile("[ \t\r\f\v]*\n[ \t\r\f\v]*\n[ \t\r\f\v]*")
matches = pattern.finditer(text)
last_break = 0
pbreaks = [0]
for pb in matches:
if pb.start() - last_break < MIN_PARAGRAPH:
continue
else:
pbreaks.append(pb.start())
last_break = pb.start()
return pbreaks
def _divide_to_tokensequences(self, text):
"Divides the text into pseudosentences of fixed size"
w = self.w
wrdindex_list = []
matches = re.finditer(r"\w+", text)
for match in matches:
wrdindex_list.append((match.group(), match.start()))
return [
TokenSequence(i / w, wrdindex_list[i : i + w])
for i in range(0, len(wrdindex_list), w)
]
def _create_token_table(self, token_sequences, par_breaks):
"Creates a table of TokenTableFields"
token_table = {}
current_par = 0
current_tok_seq = 0
pb_iter = par_breaks.__iter__()
current_par_break = next(pb_iter)
if current_par_break == 0:
try:
current_par_break = next(pb_iter) # skip break at 0
except StopIteration as e:
raise ValueError(
"No paragraph breaks were found(text too short perhaps?)"
) from e
for ts in token_sequences:
for word, index in ts.wrdindex_list:
try:
while index > current_par_break:
current_par_break = next(pb_iter)
current_par += 1
except StopIteration:
# hit bottom
pass
if word in token_table:
token_table[word].total_count += 1
if token_table[word].last_par != current_par:
token_table[word].last_par = current_par
token_table[word].par_count += 1
if token_table[word].last_tok_seq != current_tok_seq:
token_table[word].last_tok_seq = current_tok_seq
token_table[word].ts_occurences.append([current_tok_seq, 1])
else:
token_table[word].ts_occurences[-1][1] += 1
else: # new word
token_table[word] = TokenTableField(
first_pos=index,
ts_occurences=[[current_tok_seq, 1]],
total_count=1,
par_count=1,
last_par=current_par,
last_tok_seq=current_tok_seq,
)
current_tok_seq += 1
return token_table
def _identify_boundaries(self, depth_scores):
"""Identifies boundaries at the peaks of similarity score
differences"""
boundaries = [0 for x in depth_scores]
avg = sum(depth_scores) / len(depth_scores)
stdev = numpy.std(depth_scores)
if self.cutoff_policy == LC:
cutoff = avg - stdev
else:
cutoff = avg - stdev / 2.0
depth_tuples = sorted(zip(depth_scores, range(len(depth_scores))))
depth_tuples.reverse()
hp = list(filter(lambda x: x[0] > cutoff, depth_tuples))
for dt in hp:
boundaries[dt[1]] = 1
for dt2 in hp: # undo if there is a boundary close already
if (
dt[1] != dt2[1]
and abs(dt2[1] - dt[1]) < 4
and boundaries[dt2[1]] == 1
):
boundaries[dt[1]] = 0
return boundaries
def _depth_scores(self, scores):
"""Calculates the depth of each gap, i.e. the average difference
between the left and right peaks and the gap's score"""
depth_scores = [0 for x in scores]
# clip boundaries: this holds on the rule of thumb(my thumb)
# that a section shouldn't be smaller than at least 2
# pseudosentences for small texts and around 5 for larger ones.
clip = min(max(len(scores) // 10, 2), 5)
index = clip
for gapscore in scores[clip:-clip]:
lpeak = gapscore
for score in scores[index::-1]:
if score >= lpeak:
lpeak = score
else:
break
rpeak = gapscore
for score in scores[index:]:
if score >= rpeak:
rpeak = score
else:
break
depth_scores[index] = lpeak + rpeak - 2 * gapscore
index += 1
return depth_scores
def _normalize_boundaries(self, text, boundaries, paragraph_breaks):
"""Normalize the boundaries identified to the original text's
paragraph breaks"""
norm_boundaries = []
char_count, word_count, gaps_seen = 0, 0, 0
seen_word = False
for char in text:
char_count += 1
if char in " \t\n" and seen_word:
seen_word = False
word_count += 1
if char not in " \t\n" and not seen_word:
seen_word = True
if gaps_seen < len(boundaries) and word_count > (
max(gaps_seen * self.w, self.w)
):
if boundaries[gaps_seen] == 1:
# find closest paragraph break
best_fit = len(text)
for br in paragraph_breaks:
if best_fit > abs(br - char_count):
best_fit = abs(br - char_count)
bestbr = br
else:
break
if bestbr not in norm_boundaries: # avoid duplicates
norm_boundaries.append(bestbr)
gaps_seen += 1
return norm_boundaries
brown: CategorizedTaggedCorpusReader = LazyCorpusLoader(
"brown",
CategorizedTaggedCorpusReader,
r"c[a-z]\d\d",
cat_file="cats.txt",
tagset="brown",
encoding="ascii",
)
def demo(text=None):
from matplotlib import pylab
from nltk.corpus import brown
tt = TextTilingTokenizer(demo_mode=True)
if text is None:
text = brown.raw()[:10000]
s, ss, d, b = tt.tokenize(text)
pylab.xlabel("Sentence Gap index")
pylab.ylabel("Gap Scores")
pylab.plot(range(len(s)), s, label="Gap Scores")
pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
pylab.plot(range(len(d)), d, label="Depth scores")
pylab.stem(range(len(b)), b)
pylab.legend()
pylab.show() | null |
170,923 | import re
from nltk.tokenize.api import TokenizerI
from nltk.tokenize.util import regexp_span_tokenize
class RegexpTokenizer(TokenizerI):
r"""
A tokenizer that splits a string using a regular expression, which
matches either the tokens or the separators between tokens.
>>> tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+')
:type pattern: str
:param pattern: The pattern used to build this tokenizer.
(This pattern must not contain capturing parentheses;
Use non-capturing parentheses, e.g. (?:...), instead)
:type gaps: bool
:param gaps: True if this tokenizer's pattern should be used
to find separators between tokens; False if this
tokenizer's pattern should be used to find the tokens
themselves.
:type discard_empty: bool
:param discard_empty: True if any empty tokens `''`
generated by the tokenizer should be discarded. Empty
tokens can only be generated if `_gaps == True`.
:type flags: int
:param flags: The regexp flags used to compile this
tokenizer's pattern. By default, the following flags are
used: `re.UNICODE | re.MULTILINE | re.DOTALL`.
"""
def __init__(
self,
pattern,
gaps=False,
discard_empty=True,
flags=re.UNICODE | re.MULTILINE | re.DOTALL,
):
# If they gave us a regexp object, extract the pattern.
pattern = getattr(pattern, "pattern", pattern)
self._pattern = pattern
self._gaps = gaps
self._discard_empty = discard_empty
self._flags = flags
self._regexp = None
def _check_regexp(self):
if self._regexp is None:
self._regexp = re.compile(self._pattern, self._flags)
def tokenize(self, text):
self._check_regexp()
# If our regexp matches gaps, use re.split:
if self._gaps:
if self._discard_empty:
return [tok for tok in self._regexp.split(text) if tok]
else:
return self._regexp.split(text)
# If our regexp matches tokens, use re.findall:
else:
return self._regexp.findall(text)
def span_tokenize(self, text):
self._check_regexp()
if self._gaps:
for left, right in regexp_span_tokenize(text, self._regexp):
if not (self._discard_empty and left == right):
yield left, right
else:
for m in re.finditer(self._regexp, text):
yield m.span()
def __repr__(self):
return "{}(pattern={!r}, gaps={!r}, discard_empty={!r}, flags={!r})".format(
self.__class__.__name__,
self._pattern,
self._gaps,
self._discard_empty,
self._flags,
)
The provided code snippet includes necessary dependencies for implementing the `regexp_tokenize` function. Write a Python function `def regexp_tokenize( text, pattern, gaps=False, discard_empty=True, flags=re.UNICODE | re.MULTILINE | re.DOTALL, )` to solve the following problem:
Return a tokenized copy of *text*. See :class:`.RegexpTokenizer` for descriptions of the arguments.
Here is the function:
def regexp_tokenize(
text,
pattern,
gaps=False,
discard_empty=True,
flags=re.UNICODE | re.MULTILINE | re.DOTALL,
):
"""
Return a tokenized copy of *text*. See :class:`.RegexpTokenizer`
for descriptions of the arguments.
"""
tokenizer = RegexpTokenizer(pattern, gaps, discard_empty, flags)
return tokenizer.tokenize(text) | Return a tokenized copy of *text*. See :class:`.RegexpTokenizer` for descriptions of the arguments. |
170,924 | import functools
import itertools
import os
import shutil
import subprocess
import sys
import textwrap
import threading
import time
import warnings
import zipfile
from hashlib import md5
from xml.etree import ElementTree
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
import nltk
class ErrorMessage(DownloaderMessage):
"""Data server encountered an error"""
def __init__(self, package, message):
self.package = package
if isinstance(message, Exception):
self.message = str(message)
else:
self.message = message
def _unzip_iter(filename, root, verbose=True):
if verbose:
sys.stdout.write("Unzipping %s" % os.path.split(filename)[1])
sys.stdout.flush()
try:
zf = zipfile.ZipFile(filename)
except zipfile.error as e:
yield ErrorMessage(filename, "Error with downloaded zip file")
return
except Exception as e:
yield ErrorMessage(filename, e)
return
zf.extractall(root)
if verbose:
print()
The provided code snippet includes necessary dependencies for implementing the `unzip` function. Write a Python function `def unzip(filename, root, verbose=True)` to solve the following problem:
Extract the contents of the zip file ``filename`` into the directory ``root``.
Here is the function:
def unzip(filename, root, verbose=True):
"""
Extract the contents of the zip file ``filename`` into the
directory ``root``.
"""
for message in _unzip_iter(filename, root, verbose):
if isinstance(message, ErrorMessage):
raise Exception(message) | Extract the contents of the zip file ``filename`` into the directory ``root``. |
170,925 | import functools
import itertools
import os
import shutil
import subprocess
import sys
import textwrap
import threading
import time
import warnings
import zipfile
from hashlib import md5
from xml.etree import ElementTree
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
import nltk
def md5_hexdigest(file):
"""
Calculate and return the MD5 checksum for a given file.
``file`` may either be a filename or an open stream.
"""
if isinstance(file, str):
with open(file, "rb") as infile:
return _md5_hexdigest(infile)
return _md5_hexdigest(file)
def _indent_xml(xml, prefix=""):
"""
Helper for ``build_index()``: Given an XML ``ElementTree``, modify it
(and its descendents) ``text`` and ``tail`` attributes to generate
an indented tree, where each nested element is indented by 2
spaces with respect to its parent.
"""
if len(xml) > 0:
xml.text = (xml.text or "").strip() + "\n" + prefix + " "
for child in xml:
_indent_xml(child, prefix + " ")
for child in xml[:-1]:
child.tail = (child.tail or "").strip() + "\n" + prefix + " "
xml[-1].tail = (xml[-1].tail or "").strip() + "\n" + prefix
def _find_collections(root):
"""
Helper for ``build_index()``: Yield a list of ElementTree.Element
objects, each holding the xml for a single package collection.
"""
for dirname, _subdirs, files in os.walk(root):
for filename in files:
if filename.endswith(".xml"):
xmlfile = os.path.join(dirname, filename)
yield ElementTree.parse(xmlfile).getroot()
def _find_packages(root):
"""
Helper for ``build_index()``: Yield a list of tuples
``(pkg_xml, zf, subdir)``, where:
- ``pkg_xml`` is an ``ElementTree.Element`` holding the xml for a
package
- ``zf`` is a ``zipfile.ZipFile`` for the package's contents.
- ``subdir`` is the subdirectory (relative to ``root``) where
the package was found (e.g. 'corpora' or 'grammars').
"""
from nltk.corpus.reader.util import _path_from
# Find all packages.
packages = []
for dirname, subdirs, files in os.walk(root):
relpath = "/".join(_path_from(root, dirname))
for filename in files:
if filename.endswith(".xml"):
xmlfilename = os.path.join(dirname, filename)
zipfilename = xmlfilename[:-4] + ".zip"
try:
zf = zipfile.ZipFile(zipfilename)
except Exception as e:
raise ValueError(f"Error reading file {zipfilename!r}!\n{e}") from e
try:
pkg_xml = ElementTree.parse(xmlfilename).getroot()
except Exception as e:
raise ValueError(f"Error reading file {xmlfilename!r}!\n{e}") from e
# Check that the UID matches the filename
uid = os.path.split(xmlfilename[:-4])[1]
if pkg_xml.get("id") != uid:
raise ValueError(
"package identifier mismatch (%s "
"vs %s)" % (pkg_xml.get("id"), uid)
)
# Check that the zipfile expands to a subdir whose
# name matches the uid.
if sum(
(name != uid and not name.startswith(uid + "/"))
for name in zf.namelist()
):
raise ValueError(
"Zipfile %s.zip does not expand to a "
"single subdirectory %s/" % (uid, uid)
)
yield pkg_xml, zf, relpath
elif filename.endswith(".zip"):
# Warn user in case a .xml does not exist for a .zip
resourcename = os.path.splitext(filename)[0]
xmlfilename = os.path.join(dirname, resourcename + ".xml")
if not os.path.exists(xmlfilename):
warnings.warn(
f"{filename} exists, but {resourcename + '.xml'} cannot be found! "
f"This could mean that {resourcename} can not be downloaded.",
stacklevel=2,
)
# Don't recurse into svn subdirectories:
try:
subdirs.remove(".svn")
except ValueError:
pass
The provided code snippet includes necessary dependencies for implementing the `build_index` function. Write a Python function `def build_index(root, base_url)` to solve the following problem:
Create a new data.xml index file, by combining the xml description files for various packages and collections. ``root`` should be the path to a directory containing the package xml and zip files; and the collection xml files. The ``root`` directory is expected to have the following subdirectories:: root/ packages/ .................. subdirectory for packages corpora/ ................. zip & xml files for corpora grammars/ ................ zip & xml files for grammars taggers/ ................. zip & xml files for taggers tokenizers/ .............. zip & xml files for tokenizers etc. collections/ ............... xml files for collections For each package, there should be two files: ``package.zip`` (where *package* is the package name) which contains the package itself as a compressed zip file; and ``package.xml``, which is an xml description of the package. The zipfile ``package.zip`` should expand to a single subdirectory named ``package/``. The base filename ``package`` must match the identifier given in the package's xml file. For each collection, there should be a single file ``collection.zip`` describing the collection, where *collection* is the name of the collection. All identifiers (for both packages and collections) must be unique.
Here is the function:
def build_index(root, base_url):
"""
Create a new data.xml index file, by combining the xml description
files for various packages and collections. ``root`` should be the
path to a directory containing the package xml and zip files; and
the collection xml files. The ``root`` directory is expected to
have the following subdirectories::
root/
packages/ .................. subdirectory for packages
corpora/ ................. zip & xml files for corpora
grammars/ ................ zip & xml files for grammars
taggers/ ................. zip & xml files for taggers
tokenizers/ .............. zip & xml files for tokenizers
etc.
collections/ ............... xml files for collections
For each package, there should be two files: ``package.zip``
(where *package* is the package name)
which contains the package itself as a compressed zip file; and
``package.xml``, which is an xml description of the package. The
zipfile ``package.zip`` should expand to a single subdirectory
named ``package/``. The base filename ``package`` must match
the identifier given in the package's xml file.
For each collection, there should be a single file ``collection.zip``
describing the collection, where *collection* is the name of the collection.
All identifiers (for both packages and collections) must be unique.
"""
# Find all packages.
packages = []
for pkg_xml, zf, subdir in _find_packages(os.path.join(root, "packages")):
zipstat = os.stat(zf.filename)
url = f"{base_url}/{subdir}/{os.path.split(zf.filename)[1]}"
unzipped_size = sum(zf_info.file_size for zf_info in zf.infolist())
# Fill in several fields of the package xml with calculated values.
pkg_xml.set("unzipped_size", "%s" % unzipped_size)
pkg_xml.set("size", "%s" % zipstat.st_size)
pkg_xml.set("checksum", "%s" % md5_hexdigest(zf.filename))
pkg_xml.set("subdir", subdir)
# pkg_xml.set('svn_revision', _svn_revision(zf.filename))
if not pkg_xml.get("url"):
pkg_xml.set("url", url)
# Record the package.
packages.append(pkg_xml)
# Find all collections
collections = list(_find_collections(os.path.join(root, "collections")))
# Check that all UIDs are unique
uids = set()
for item in packages + collections:
if item.get("id") in uids:
raise ValueError("Duplicate UID: %s" % item.get("id"))
uids.add(item.get("id"))
# Put it all together
top_elt = ElementTree.Element("nltk_data")
top_elt.append(ElementTree.Element("packages"))
top_elt[0].extend(sorted(packages, key=lambda package: package.get("id")))
top_elt.append(ElementTree.Element("collections"))
top_elt[1].extend(sorted(collections, key=lambda collection: collection.get("id")))
_indent_xml(top_elt)
return top_elt | Create a new data.xml index file, by combining the xml description files for various packages and collections. ``root`` should be the path to a directory containing the package xml and zip files; and the collection xml files. The ``root`` directory is expected to have the following subdirectories:: root/ packages/ .................. subdirectory for packages corpora/ ................. zip & xml files for corpora grammars/ ................ zip & xml files for grammars taggers/ ................. zip & xml files for taggers tokenizers/ .............. zip & xml files for tokenizers etc. collections/ ............... xml files for collections For each package, there should be two files: ``package.zip`` (where *package* is the package name) which contains the package itself as a compressed zip file; and ``package.xml``, which is an xml description of the package. The zipfile ``package.zip`` should expand to a single subdirectory named ``package/``. The base filename ``package`` must match the identifier given in the package's xml file. For each collection, there should be a single file ``collection.zip`` describing the collection, where *collection* is the name of the collection. All identifiers (for both packages and collections) must be unique. |
170,926 | import functools
import itertools
import os
import shutil
import subprocess
import sys
import textwrap
import threading
import time
import warnings
import zipfile
from hashlib import md5
from xml.etree import ElementTree
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
import nltk
The provided code snippet includes necessary dependencies for implementing the `_check_package` function. Write a Python function `def _check_package(pkg_xml, zipfilename, zf)` to solve the following problem:
Helper for ``build_index()``: Perform some checks to make sure that the given package is consistent.
Here is the function:
def _check_package(pkg_xml, zipfilename, zf):
"""
Helper for ``build_index()``: Perform some checks to make sure that
the given package is consistent.
"""
# The filename must patch the id given in the XML file.
uid = os.path.splitext(os.path.split(zipfilename)[1])[0]
if pkg_xml.get("id") != uid:
raise ValueError(
"package identifier mismatch ({} vs {})".format(pkg_xml.get("id"), uid)
)
# Zip file must expand to a subdir whose name matches uid.
if sum((name != uid and not name.startswith(uid + "/")) for name in zf.namelist()):
raise ValueError(
"Zipfile %s.zip does not expand to a single "
"subdirectory %s/" % (uid, uid)
) | Helper for ``build_index()``: Perform some checks to make sure that the given package is consistent. |
170,927 | import functools
import itertools
import os
import shutil
import subprocess
import sys
import textwrap
import threading
import time
import warnings
import zipfile
from hashlib import md5
from xml.etree import ElementTree
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
import nltk
The provided code snippet includes necessary dependencies for implementing the `_svn_revision` function. Write a Python function `def _svn_revision(filename)` to solve the following problem:
Helper for ``build_index()``: Calculate the subversion revision number for a given file (by using ``subprocess`` to run ``svn``).
Here is the function:
def _svn_revision(filename):
"""
Helper for ``build_index()``: Calculate the subversion revision
number for a given file (by using ``subprocess`` to run ``svn``).
"""
p = subprocess.Popen(
["svn", "status", "-v", filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(stdout, stderr) = p.communicate()
if p.returncode != 0 or stderr or not stdout:
raise ValueError(
"Error determining svn_revision for %s: %s"
% (os.path.split(filename)[1], textwrap.fill(stderr))
)
return stdout.split()[2] | Helper for ``build_index()``: Calculate the subversion revision number for a given file (by using ``subprocess`` to run ``svn``). |
170,928 | import functools
import itertools
import os
import shutil
import subprocess
import sys
import textwrap
import threading
import time
import warnings
import zipfile
from hashlib import md5
from xml.etree import ElementTree
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
import nltk
class DownloaderShell:
def __init__(self, dataserver):
self._ds = dataserver
def _simple_interactive_menu(self, *options):
print("-" * 75)
spc = (68 - sum(len(o) for o in options)) // (len(options) - 1) * " "
print(" " + spc.join(options))
print("-" * 75)
def run(self):
print("NLTK Downloader")
while True:
self._simple_interactive_menu(
"d) Download",
"l) List",
" u) Update",
"c) Config",
"h) Help",
"q) Quit",
)
user_input = input("Downloader> ").strip()
if not user_input:
print()
continue
command = user_input.lower().split()[0]
args = user_input.split()[1:]
try:
if command == "l":
print()
self._ds.list(self._ds.download_dir, header=False, more_prompt=True)
elif command == "h":
self._simple_interactive_help()
elif command == "c":
self._simple_interactive_config()
elif command in ("q", "x"):
return
elif command == "d":
self._simple_interactive_download(args)
elif command == "u":
self._simple_interactive_update()
else:
print("Command %r unrecognized" % user_input)
except HTTPError as e:
print("Error reading from server: %s" % e)
except URLError as e:
print("Error connecting to server: %s" % e.reason)
# try checking if user_input is a package name, &
# downloading it?
print()
def _simple_interactive_download(self, args):
if args:
for arg in args:
try:
self._ds.download(arg, prefix=" ")
except (OSError, ValueError) as e:
print(e)
else:
while True:
print()
print("Download which package (l=list; x=cancel)?")
user_input = input(" Identifier> ")
if user_input.lower() == "l":
self._ds.list(
self._ds.download_dir,
header=False,
more_prompt=True,
skip_installed=True,
)
continue
elif user_input.lower() in ("x", "q", ""):
return
elif user_input:
for id in user_input.split():
try:
self._ds.download(id, prefix=" ")
except (OSError, ValueError) as e:
print(e)
break
def _simple_interactive_update(self):
while True:
stale_packages = []
stale = partial = False
for info in sorted(getattr(self._ds, "packages")(), key=str):
if self._ds.status(info) == self._ds.STALE:
stale_packages.append((info.id, info.name))
print()
if stale_packages:
print("Will update following packages (o=ok; x=cancel)")
for pid, pname in stale_packages:
name = textwrap.fill(
"-" * 27 + (pname), 75, subsequent_indent=27 * " "
)[27:]
print(" [ ] {} {}".format(pid.ljust(20, "."), name))
print()
user_input = input(" Identifier> ")
if user_input.lower() == "o":
for pid, pname in stale_packages:
try:
self._ds.download(pid, prefix=" ")
except (OSError, ValueError) as e:
print(e)
break
elif user_input.lower() in ("x", "q", ""):
return
else:
print("Nothing to update.")
return
def _simple_interactive_help(self):
print()
print("Commands:")
print(
" d) Download a package or collection u) Update out of date packages"
)
print(" l) List packages & collections h) Help")
print(" c) View & Modify Configuration q) Quit")
def _show_config(self):
print()
print("Data Server:")
print(" - URL: <%s>" % self._ds.url)
print(" - %d Package Collections Available" % len(self._ds.collections()))
print(" - %d Individual Packages Available" % len(self._ds.packages()))
print()
print("Local Machine:")
print(" - Data directory: %s" % self._ds.download_dir)
def _simple_interactive_config(self):
self._show_config()
while True:
print()
self._simple_interactive_menu(
"s) Show Config", "u) Set Server URL", "d) Set Data Dir", "m) Main Menu"
)
user_input = input("Config> ").strip().lower()
if user_input == "s":
self._show_config()
elif user_input == "d":
new_dl_dir = input(" New Directory> ").strip()
if new_dl_dir in ("", "x", "q", "X", "Q"):
print(" Cancelled!")
elif os.path.isdir(new_dl_dir):
self._ds.download_dir = new_dl_dir
else:
print("Directory %r not found! Create it first." % new_dl_dir)
elif user_input == "u":
new_url = input(" New URL> ").strip()
if new_url in ("", "x", "q", "X", "Q"):
print(" Cancelled!")
else:
if not new_url.startswith(("http://", "https://")):
new_url = "http://" + new_url
try:
self._ds.url = new_url
except Exception as e:
print(f"Error reading <{new_url!r}>:\n {e}")
elif user_input == "m":
break
_downloader = Downloader()
def download_shell():
DownloaderShell(_downloader).run() | null |
170,929 | import functools
import itertools
import os
import shutil
import subprocess
import sys
import textwrap
import threading
import time
import warnings
import zipfile
from hashlib import md5
from xml.etree import ElementTree
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
import nltk
class DownloaderGUI:
def __init__(self, dataserver, use_threads=True):
def _log(self, msg):
def _init_widgets(self):
def _init_menu(self):
def _select_columns(self):
def _refresh(self):
def _info_edit(self, info_key):
def _info_save(self, e=None):
def _table_reprfunc(self, row, col, val):
def _set_url(self, url):
def _set_download_dir(self, download_dir):
def _show_info(self):
def _prev_tab(self, *e):
def _next_tab(self, *e):
def _select_tab(self, event):
def _fill_table(self):
def _update_table_status(self):
def _download(self, *e):
def _download_cb(self, download_iter, ids):
def show(s):
def _select(self, id):
def _color_table(self):
def _clear_mark(self, id):
def _mark_all(self, *e):
def _table_mark(self, *e):
def _show_log(self):
def _package_to_columns(self, pkg):
def destroy(self, *e):
def _destroy(self, *e):
def mainloop(self, *args, **kwargs):
def help(self, *e):
def about(self, *e):
def _init_progressbar(self):
def _show_progress(self, percent):
def _progress_alive(self):
def _download_threaded(self, *e):
def _abort_download(self):
def __init__(self, data_server, items, lock, message_queue, abort):
def run(self):
def _monitor_message_queue(self):
def show(s):
_downloader = Downloader()
def download_gui():
DownloaderGUI(_downloader).mainloop() | null |
170,930 | import logging
import numpy as np
from matplotlib import _api, artist as martist
import matplotlib.transforms as mtransforms
import matplotlib._layoutgrid as mlayoutgrid
def make_layoutgrids(fig, layoutgrids, rect=(0, 0, 1, 1)):
"""
Make the layoutgrid tree.
(Sub)Figures get a layoutgrid so we can have figure margins.
Gridspecs that are attached to axes get a layoutgrid so axes
can have margins.
"""
if layoutgrids is None:
layoutgrids = dict()
layoutgrids['hasgrids'] = False
if not hasattr(fig, '_parent'):
# top figure; pass rect as parent to allow user-specified
# margins
layoutgrids[fig] = mlayoutgrid.LayoutGrid(parent=rect, name='figlb')
else:
# subfigure
gs = fig._subplotspec.get_gridspec()
# it is possible the gridspec containing this subfigure hasn't
# been added to the tree yet:
layoutgrids = make_layoutgrids_gs(layoutgrids, gs)
# add the layoutgrid for the subfigure:
parentlb = layoutgrids[gs]
layoutgrids[fig] = mlayoutgrid.LayoutGrid(
parent=parentlb,
name='panellb',
parent_inner=True,
nrows=1, ncols=1,
parent_pos=(fig._subplotspec.rowspan,
fig._subplotspec.colspan))
# recursively do all subfigures in this figure...
for sfig in fig.subfigs:
layoutgrids = make_layoutgrids(sfig, layoutgrids)
# for each axes at the local level add its gridspec:
for ax in fig._localaxes:
gs = ax.get_gridspec()
if gs is not None:
layoutgrids = make_layoutgrids_gs(layoutgrids, gs)
return layoutgrids
def check_no_collapsed_axes(layoutgrids, fig):
"""
Check that no axes have collapsed to zero size.
"""
for sfig in fig.subfigs:
ok = check_no_collapsed_axes(layoutgrids, sfig)
if not ok:
return False
for ax in fig.axes:
gs = ax.get_gridspec()
if gs in layoutgrids: # also implies gs is not None.
lg = layoutgrids[gs]
for i in range(gs.nrows):
for j in range(gs.ncols):
bb = lg.get_inner_bbox(i, j)
if bb.width <= 0 or bb.height <= 0:
return False
return True
def compress_fixed_aspect(layoutgrids, fig):
gs = None
for ax in fig.axes:
if ax.get_subplotspec() is None:
continue
ax.apply_aspect()
sub = ax.get_subplotspec()
_gs = sub.get_gridspec()
if gs is None:
gs = _gs
extraw = np.zeros(gs.ncols)
extrah = np.zeros(gs.nrows)
elif _gs != gs:
raise ValueError('Cannot do compressed layout if axes are not'
'all from the same gridspec')
orig = ax.get_position(original=True)
actual = ax.get_position(original=False)
dw = orig.width - actual.width
if dw > 0:
extraw[sub.colspan] = np.maximum(extraw[sub.colspan], dw)
dh = orig.height - actual.height
if dh > 0:
extrah[sub.rowspan] = np.maximum(extrah[sub.rowspan], dh)
if gs is None:
raise ValueError('Cannot do compressed layout if no axes '
'are part of a gridspec.')
w = np.sum(extraw) / 2
layoutgrids[fig].edit_margin_min('left', w)
layoutgrids[fig].edit_margin_min('right', w)
h = np.sum(extrah) / 2
layoutgrids[fig].edit_margin_min('top', h)
layoutgrids[fig].edit_margin_min('bottom', h)
return layoutgrids
def make_layout_margins(layoutgrids, fig, renderer, *, w_pad=0, h_pad=0,
hspace=0, wspace=0):
"""
For each axes, make a margin between the *pos* layoutbox and the
*axes* layoutbox be a minimum size that can accommodate the
decorations on the axis.
Then make room for colorbars.
"""
for sfig in fig.subfigs: # recursively make child panel margins
ss = sfig._subplotspec
make_layout_margins(layoutgrids, sfig, renderer,
w_pad=w_pad, h_pad=h_pad,
hspace=hspace, wspace=wspace)
margins = get_margin_from_padding(sfig, w_pad=0, h_pad=0,
hspace=hspace, wspace=wspace)
layoutgrids[sfig].parent.edit_outer_margin_mins(margins, ss)
for ax in fig._localaxes:
if not ax.get_subplotspec() or not ax.get_in_layout():
continue
ss = ax.get_subplotspec()
gs = ss.get_gridspec()
if gs not in layoutgrids:
return
margin = get_margin_from_padding(ax, w_pad=w_pad, h_pad=h_pad,
hspace=hspace, wspace=wspace)
pos, bbox = get_pos_and_bbox(ax, renderer)
# the margin is the distance between the bounding box of the axes
# and its position (plus the padding from above)
margin['left'] += pos.x0 - bbox.x0
margin['right'] += bbox.x1 - pos.x1
# remember that rows are ordered from top:
margin['bottom'] += pos.y0 - bbox.y0
margin['top'] += bbox.y1 - pos.y1
# make margin for colorbars. These margins go in the
# padding margin, versus the margin for axes decorators.
for cbax in ax._colorbars:
# note pad is a fraction of the parent width...
pad = colorbar_get_pad(layoutgrids, cbax)
# colorbars can be child of more than one subplot spec:
cbp_rspan, cbp_cspan = get_cb_parent_spans(cbax)
loc = cbax._colorbar_info['location']
cbpos, cbbbox = get_pos_and_bbox(cbax, renderer)
if loc == 'right':
if cbp_cspan.stop == ss.colspan.stop:
# only increase if the colorbar is on the right edge
margin['rightcb'] += cbbbox.width + pad
elif loc == 'left':
if cbp_cspan.start == ss.colspan.start:
# only increase if the colorbar is on the left edge
margin['leftcb'] += cbbbox.width + pad
elif loc == 'top':
if cbp_rspan.start == ss.rowspan.start:
margin['topcb'] += cbbbox.height + pad
else:
if cbp_rspan.stop == ss.rowspan.stop:
margin['bottomcb'] += cbbbox.height + pad
# If the colorbars are wider than the parent box in the
# cross direction
if loc in ['top', 'bottom']:
if (cbp_cspan.start == ss.colspan.start and
cbbbox.x0 < bbox.x0):
margin['left'] += bbox.x0 - cbbbox.x0
if (cbp_cspan.stop == ss.colspan.stop and
cbbbox.x1 > bbox.x1):
margin['right'] += cbbbox.x1 - bbox.x1
# or taller:
if loc in ['left', 'right']:
if (cbp_rspan.stop == ss.rowspan.stop and
cbbbox.y0 < bbox.y0):
margin['bottom'] += bbox.y0 - cbbbox.y0
if (cbp_rspan.start == ss.rowspan.start and
cbbbox.y1 > bbox.y1):
margin['top'] += cbbbox.y1 - bbox.y1
# pass the new margins down to the layout grid for the solution...
layoutgrids[gs].edit_outer_margin_mins(margin, ss)
# make margins for figure-level legends:
for leg in fig.legends:
inv_trans_fig = None
if leg._outside_loc and leg._bbox_to_anchor is None:
if inv_trans_fig is None:
inv_trans_fig = fig.transFigure.inverted().transform_bbox
bbox = inv_trans_fig(leg.get_tightbbox(renderer))
w = bbox.width + 2 * w_pad
h = bbox.height + 2 * h_pad
legendloc = leg._outside_loc
if legendloc == 'lower':
layoutgrids[fig].edit_margin_min('bottom', h)
elif legendloc == 'upper':
layoutgrids[fig].edit_margin_min('top', h)
if legendloc == 'right':
layoutgrids[fig].edit_margin_min('right', w)
elif legendloc == 'left':
layoutgrids[fig].edit_margin_min('left', w)
def make_margin_suptitles(layoutgrids, fig, renderer, *, w_pad=0, h_pad=0):
# Figure out how large the suptitle is and make the
# top level figure margin larger.
inv_trans_fig = fig.transFigure.inverted().transform_bbox
# get the h_pad and w_pad as distances in the local subfigure coordinates:
padbox = mtransforms.Bbox([[0, 0], [w_pad, h_pad]])
padbox = (fig.transFigure -
fig.transSubfigure).transform_bbox(padbox)
h_pad_local = padbox.height
w_pad_local = padbox.width
for sfig in fig.subfigs:
make_margin_suptitles(layoutgrids, sfig, renderer,
w_pad=w_pad, h_pad=h_pad)
if fig._suptitle is not None and fig._suptitle.get_in_layout():
p = fig._suptitle.get_position()
if getattr(fig._suptitle, '_autopos', False):
fig._suptitle.set_position((p[0], 1 - h_pad_local))
bbox = inv_trans_fig(fig._suptitle.get_tightbbox(renderer))
layoutgrids[fig].edit_margin_min('top', bbox.height + 2 * h_pad)
if fig._supxlabel is not None and fig._supxlabel.get_in_layout():
p = fig._supxlabel.get_position()
if getattr(fig._supxlabel, '_autopos', False):
fig._supxlabel.set_position((p[0], h_pad_local))
bbox = inv_trans_fig(fig._supxlabel.get_tightbbox(renderer))
layoutgrids[fig].edit_margin_min('bottom',
bbox.height + 2 * h_pad)
if fig._supylabel is not None and fig._supylabel.get_in_layout():
p = fig._supylabel.get_position()
if getattr(fig._supylabel, '_autopos', False):
fig._supylabel.set_position((w_pad_local, p[1]))
bbox = inv_trans_fig(fig._supylabel.get_tightbbox(renderer))
layoutgrids[fig].edit_margin_min('left', bbox.width + 2 * w_pad)
def match_submerged_margins(layoutgrids, fig):
"""
Make the margins that are submerged inside an Axes the same size.
This allows axes that span two columns (or rows) that are offset
from one another to have the same size.
This gives the proper layout for something like::
fig = plt.figure(constrained_layout=True)
axs = fig.subplot_mosaic("AAAB\nCCDD")
Without this routine, the axes D will be wider than C, because the
margin width between the two columns in C has no width by default,
whereas the margins between the two columns of D are set by the
width of the margin between A and B. However, obviously the user would
like C and D to be the same size, so we need to add constraints to these
"submerged" margins.
This routine makes all the interior margins the same, and the spacing
between the three columns in A and the two column in C are all set to the
margins between the two columns of D.
See test_constrained_layout::test_constrained_layout12 for an example.
"""
for sfig in fig.subfigs:
match_submerged_margins(layoutgrids, sfig)
axs = [a for a in fig.get_axes()
if a.get_subplotspec() is not None and a.get_in_layout()]
for ax1 in axs:
ss1 = ax1.get_subplotspec()
if ss1.get_gridspec() not in layoutgrids:
axs.remove(ax1)
continue
lg1 = layoutgrids[ss1.get_gridspec()]
# interior columns:
if len(ss1.colspan) > 1:
maxsubl = np.max(
lg1.margin_vals['left'][ss1.colspan[1:]] +
lg1.margin_vals['leftcb'][ss1.colspan[1:]]
)
maxsubr = np.max(
lg1.margin_vals['right'][ss1.colspan[:-1]] +
lg1.margin_vals['rightcb'][ss1.colspan[:-1]]
)
for ax2 in axs:
ss2 = ax2.get_subplotspec()
lg2 = layoutgrids[ss2.get_gridspec()]
if lg2 is not None and len(ss2.colspan) > 1:
maxsubl2 = np.max(
lg2.margin_vals['left'][ss2.colspan[1:]] +
lg2.margin_vals['leftcb'][ss2.colspan[1:]])
if maxsubl2 > maxsubl:
maxsubl = maxsubl2
maxsubr2 = np.max(
lg2.margin_vals['right'][ss2.colspan[:-1]] +
lg2.margin_vals['rightcb'][ss2.colspan[:-1]])
if maxsubr2 > maxsubr:
maxsubr = maxsubr2
for i in ss1.colspan[1:]:
lg1.edit_margin_min('left', maxsubl, cell=i)
for i in ss1.colspan[:-1]:
lg1.edit_margin_min('right', maxsubr, cell=i)
# interior rows:
if len(ss1.rowspan) > 1:
maxsubt = np.max(
lg1.margin_vals['top'][ss1.rowspan[1:]] +
lg1.margin_vals['topcb'][ss1.rowspan[1:]]
)
maxsubb = np.max(
lg1.margin_vals['bottom'][ss1.rowspan[:-1]] +
lg1.margin_vals['bottomcb'][ss1.rowspan[:-1]]
)
for ax2 in axs:
ss2 = ax2.get_subplotspec()
lg2 = layoutgrids[ss2.get_gridspec()]
if lg2 is not None:
if len(ss2.rowspan) > 1:
maxsubt = np.max([np.max(
lg2.margin_vals['top'][ss2.rowspan[1:]] +
lg2.margin_vals['topcb'][ss2.rowspan[1:]]
), maxsubt])
maxsubb = np.max([np.max(
lg2.margin_vals['bottom'][ss2.rowspan[:-1]] +
lg2.margin_vals['bottomcb'][ss2.rowspan[:-1]]
), maxsubb])
for i in ss1.rowspan[1:]:
lg1.edit_margin_min('top', maxsubt, cell=i)
for i in ss1.rowspan[:-1]:
lg1.edit_margin_min('bottom', maxsubb, cell=i)
def reposition_axes(layoutgrids, fig, renderer, *,
w_pad=0, h_pad=0, hspace=0, wspace=0):
"""
Reposition all the axes based on the new inner bounding box.
"""
trans_fig_to_subfig = fig.transFigure - fig.transSubfigure
for sfig in fig.subfigs:
bbox = layoutgrids[sfig].get_outer_bbox()
sfig._redo_transform_rel_fig(
bbox=bbox.transformed(trans_fig_to_subfig))
reposition_axes(layoutgrids, sfig, renderer,
w_pad=w_pad, h_pad=h_pad,
wspace=wspace, hspace=hspace)
for ax in fig._localaxes:
if ax.get_subplotspec() is None or not ax.get_in_layout():
continue
# grid bbox is in Figure coordinates, but we specify in panel
# coordinates...
ss = ax.get_subplotspec()
gs = ss.get_gridspec()
if gs not in layoutgrids:
return
bbox = layoutgrids[gs].get_inner_bbox(rows=ss.rowspan,
cols=ss.colspan)
# transform from figure to panel for set_position:
newbbox = trans_fig_to_subfig.transform_bbox(bbox)
ax._set_position(newbbox)
# move the colorbars:
# we need to keep track of oldw and oldh if there is more than
# one colorbar:
offset = {'left': 0, 'right': 0, 'bottom': 0, 'top': 0}
for nn, cbax in enumerate(ax._colorbars[::-1]):
if ax == cbax._colorbar_info['parents'][0]:
reposition_colorbar(layoutgrids, cbax, renderer,
offset=offset)
def reset_margins(layoutgrids, fig):
"""
Reset the margins in the layoutboxes of fig.
Margins are usually set as a minimum, so if the figure gets smaller
the minimum needs to be zero in order for it to grow again.
"""
for sfig in fig.subfigs:
reset_margins(layoutgrids, sfig)
for ax in fig.axes:
if ax.get_in_layout():
gs = ax.get_gridspec()
if gs in layoutgrids: # also implies gs is not None.
layoutgrids[gs].reset_margins()
layoutgrids[fig].reset_margins()
The provided code snippet includes necessary dependencies for implementing the `do_constrained_layout` function. Write a Python function `def do_constrained_layout(fig, h_pad, w_pad, hspace=None, wspace=None, rect=(0, 0, 1, 1), compress=False)` to solve the following problem:
Do the constrained_layout. Called at draw time in ``figure.constrained_layout()`` Parameters ---------- fig : Figure ``Figure`` instance to do the layout in. renderer : Renderer Renderer to use. h_pad, w_pad : float Padding around the axes elements in figure-normalized units. hspace, wspace : float Fraction of the figure to dedicate to space between the axes. These are evenly spread between the gaps between the axes. A value of 0.2 for a three-column layout would have a space of 0.1 of the figure width between each column. If h/wspace < h/w_pad, then the pads are used instead. rect : tuple of 4 floats Rectangle in figure coordinates to perform constrained layout in [left, bottom, width, height], each from 0-1. compress : bool Whether to shift Axes so that white space in between them is removed. This is useful for simple grids of fixed-aspect Axes (e.g. a grid of images). Returns ------- layoutgrid : private debugging structure
Here is the function:
def do_constrained_layout(fig, h_pad, w_pad,
hspace=None, wspace=None, rect=(0, 0, 1, 1),
compress=False):
"""
Do the constrained_layout. Called at draw time in
``figure.constrained_layout()``
Parameters
----------
fig : Figure
``Figure`` instance to do the layout in.
renderer : Renderer
Renderer to use.
h_pad, w_pad : float
Padding around the axes elements in figure-normalized units.
hspace, wspace : float
Fraction of the figure to dedicate to space between the
axes. These are evenly spread between the gaps between the axes.
A value of 0.2 for a three-column layout would have a space
of 0.1 of the figure width between each column.
If h/wspace < h/w_pad, then the pads are used instead.
rect : tuple of 4 floats
Rectangle in figure coordinates to perform constrained layout in
[left, bottom, width, height], each from 0-1.
compress : bool
Whether to shift Axes so that white space in between them is
removed. This is useful for simple grids of fixed-aspect Axes (e.g.
a grid of images).
Returns
-------
layoutgrid : private debugging structure
"""
renderer = fig._get_renderer()
# make layoutgrid tree...
layoutgrids = make_layoutgrids(fig, None, rect=rect)
if not layoutgrids['hasgrids']:
_api.warn_external('There are no gridspecs with layoutgrids. '
'Possibly did not call parent GridSpec with the'
' "figure" keyword')
return
for _ in range(2):
# do the algorithm twice. This has to be done because decorations
# change size after the first re-position (i.e. x/yticklabels get
# larger/smaller). This second reposition tends to be much milder,
# so doing twice makes things work OK.
# make margins for all the axes and subfigures in the
# figure. Add margins for colorbars...
make_layout_margins(layoutgrids, fig, renderer, h_pad=h_pad,
w_pad=w_pad, hspace=hspace, wspace=wspace)
make_margin_suptitles(layoutgrids, fig, renderer, h_pad=h_pad,
w_pad=w_pad)
# if a layout is such that a columns (or rows) margin has no
# constraints, we need to make all such instances in the grid
# match in margin size.
match_submerged_margins(layoutgrids, fig)
# update all the variables in the layout.
layoutgrids[fig].update_variables()
warn_collapsed = ('constrained_layout not applied because '
'axes sizes collapsed to zero. Try making '
'figure larger or axes decorations smaller.')
if check_no_collapsed_axes(layoutgrids, fig):
reposition_axes(layoutgrids, fig, renderer, h_pad=h_pad,
w_pad=w_pad, hspace=hspace, wspace=wspace)
if compress:
layoutgrids = compress_fixed_aspect(layoutgrids, fig)
layoutgrids[fig].update_variables()
if check_no_collapsed_axes(layoutgrids, fig):
reposition_axes(layoutgrids, fig, renderer, h_pad=h_pad,
w_pad=w_pad, hspace=hspace, wspace=wspace)
else:
_api.warn_external(warn_collapsed)
else:
_api.warn_external(warn_collapsed)
reset_margins(layoutgrids, fig)
return layoutgrids | Do the constrained_layout. Called at draw time in ``figure.constrained_layout()`` Parameters ---------- fig : Figure ``Figure`` instance to do the layout in. renderer : Renderer Renderer to use. h_pad, w_pad : float Padding around the axes elements in figure-normalized units. hspace, wspace : float Fraction of the figure to dedicate to space between the axes. These are evenly spread between the gaps between the axes. A value of 0.2 for a three-column layout would have a space of 0.1 of the figure width between each column. If h/wspace < h/w_pad, then the pads are used instead. rect : tuple of 4 floats Rectangle in figure coordinates to perform constrained layout in [left, bottom, width, height], each from 0-1. compress : bool Whether to shift Axes so that white space in between them is removed. This is useful for simple grids of fixed-aspect Axes (e.g. a grid of images). Returns ------- layoutgrid : private debugging structure |
170,931 | from matplotlib._tight_layout import * from matplotlib import _api
def get_renderer(fig):
canvas = fig.canvas
if canvas and hasattr(canvas, "get_renderer"):
return canvas.get_renderer()
else:
from . import backend_bases
return backend_bases._get_renderer(fig) | null |
170,932 | import functools
from numbers import Integral
import numpy as np
from numpy import ma
import matplotlib as mpl
from matplotlib import _api, _docstring
from matplotlib.backend_bases import MouseButton
from matplotlib.text import Text
import matplotlib.path as mpath
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.collections as mcoll
import matplotlib.font_manager as font_manager
import matplotlib.cbook as cbook
import matplotlib.patches as mpatches
import matplotlib.transforms as mtransforms
class MouseButton(IntEnum):
LEFT = 1
MIDDLE = 2
RIGHT = 3
BACK = 8
FORWARD = 9
def _contour_labeler_event_handler(cs, inline, inline_spacing, event):
canvas = cs.axes.figure.canvas
is_button = event.name == "button_press_event"
is_key = event.name == "key_press_event"
# Quit (even if not in infinite mode; this is consistent with
# MATLAB and sometimes quite useful, but will require the user to
# test how many points were actually returned before using data).
if (is_button and event.button == MouseButton.MIDDLE
or is_key and event.key in ["escape", "enter"]):
canvas.stop_event_loop()
# Pop last click.
elif (is_button and event.button == MouseButton.RIGHT
or is_key and event.key in ["backspace", "delete"]):
# Unfortunately, if one is doing inline labels, then there is currently
# no way to fix the broken contour - once humpty-dumpty is broken, he
# can't be put back together. In inline mode, this does nothing.
if not inline:
cs.pop_label()
canvas.draw()
# Add new click.
elif (is_button and event.button == MouseButton.LEFT
# On macOS/gtk, some keys return None.
or is_key and event.key is not None):
if event.inaxes == cs.axes:
cs.add_label_near(event.x, event.y, transform=False,
inline=inline, inline_spacing=inline_spacing)
canvas.draw() | null |
170,933 | import functools
from numbers import Integral
import numpy as np
from numpy import ma
import matplotlib as mpl
from matplotlib import _api, _docstring
from matplotlib.backend_bases import MouseButton
from matplotlib.text import Text
import matplotlib.path as mpath
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.collections as mcoll
import matplotlib.font_manager as font_manager
import matplotlib.cbook as cbook
import matplotlib.patches as mpatches
import matplotlib.transforms as mtransforms
The provided code snippet includes necessary dependencies for implementing the `_is_closed_polygon` function. Write a Python function `def _is_closed_polygon(X)` to solve the following problem:
Return whether first and last object in a sequence are the same. These are presumably coordinates on a polygonal curve, in which case this function tests if that curve is closed.
Here is the function:
def _is_closed_polygon(X):
"""
Return whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.allclose(X[0], X[-1], rtol=1e-10, atol=1e-13) | Return whether first and last object in a sequence are the same. These are presumably coordinates on a polygonal curve, in which case this function tests if that curve is closed. |
170,934 | import functools
from numbers import Integral
import numpy as np
from numpy import ma
import matplotlib as mpl
from matplotlib import _api, _docstring
from matplotlib.backend_bases import MouseButton
from matplotlib.text import Text
import matplotlib.path as mpath
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.collections as mcoll
import matplotlib.font_manager as font_manager
import matplotlib.cbook as cbook
import matplotlib.patches as mpatches
import matplotlib.transforms as mtransforms
The provided code snippet includes necessary dependencies for implementing the `_find_closest_point_on_path` function. Write a Python function `def _find_closest_point_on_path(xys, p)` to solve the following problem:
Parameters ---------- xys : (N, 2) array-like Coordinates of vertices. p : (float, float) Coordinates of point. Returns ------- d2min : float Minimum square distance of *p* to *xys*. proj : (float, float) Projection of *p* onto *xys*. imin : (int, int) Consecutive indices of vertices of segment in *xys* where *proj* is. Segments are considered as including their end-points; i.e. if the closest point on the path is a node in *xys* with index *i*, this returns ``(i-1, i)``. For the special case where *xys* is a single point, this returns ``(0, 0)``.
Here is the function:
def _find_closest_point_on_path(xys, p):
"""
Parameters
----------
xys : (N, 2) array-like
Coordinates of vertices.
p : (float, float)
Coordinates of point.
Returns
-------
d2min : float
Minimum square distance of *p* to *xys*.
proj : (float, float)
Projection of *p* onto *xys*.
imin : (int, int)
Consecutive indices of vertices of segment in *xys* where *proj* is.
Segments are considered as including their end-points; i.e. if the
closest point on the path is a node in *xys* with index *i*, this
returns ``(i-1, i)``. For the special case where *xys* is a single
point, this returns ``(0, 0)``.
"""
if len(xys) == 1:
return (((p - xys[0]) ** 2).sum(), xys[0], (0, 0))
dxys = xys[1:] - xys[:-1] # Individual segment vectors.
norms = (dxys ** 2).sum(axis=1)
norms[norms == 0] = 1 # For zero-length segment, replace 0/0 by 0/1.
rel_projs = np.clip( # Project onto each segment in relative 0-1 coords.
((p - xys[:-1]) * dxys).sum(axis=1) / norms,
0, 1)[:, None]
projs = xys[:-1] + rel_projs * dxys # Projs. onto each segment, in (x, y).
d2s = ((projs - p) ** 2).sum(axis=1) # Squared distances.
imin = np.argmin(d2s)
return (d2s[imin], projs[imin], (imin, imin+1)) | Parameters ---------- xys : (N, 2) array-like Coordinates of vertices. p : (float, float) Coordinates of point. Returns ------- d2min : float Minimum square distance of *p* to *xys*. proj : (float, float) Projection of *p* onto *xys*. imin : (int, int) Consecutive indices of vertices of segment in *xys* where *proj* is. Segments are considered as including their end-points; i.e. if the closest point on the path is a node in *xys* with index *i*, this returns ``(i-1, i)``. For the special case where *xys* is a single point, this returns ``(0, 0)``. |
170,935 | import functools
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _docstring
import matplotlib.artist as martist
import matplotlib.path as mpath
import matplotlib.text as mtext
import matplotlib.transforms as mtransforms
from matplotlib.font_manager import FontProperties
from matplotlib.image import BboxImage
from matplotlib.patches import (
FancyBboxPatch, FancyArrowPatch, bbox_artist as mbbox_artist)
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
class Bbox(BboxBase):
"""
A mutable bounding box.
Examples
--------
**Create from known bounds**
The default constructor takes the boundary "points" ``[[xmin, ymin],
[xmax, ymax]]``.
>>> Bbox([[1, 1], [3, 7]])
Bbox([[1.0, 1.0], [3.0, 7.0]])
Alternatively, a Bbox can be created from the flattened points array, the
so-called "extents" ``(xmin, ymin, xmax, ymax)``
>>> Bbox.from_extents(1, 1, 3, 7)
Bbox([[1.0, 1.0], [3.0, 7.0]])
or from the "bounds" ``(xmin, ymin, width, height)``.
>>> Bbox.from_bounds(1, 1, 2, 6)
Bbox([[1.0, 1.0], [3.0, 7.0]])
**Create from collections of points**
The "empty" object for accumulating Bboxs is the null bbox, which is a
stand-in for the empty set.
>>> Bbox.null()
Bbox([[inf, inf], [-inf, -inf]])
Adding points to the null bbox will give you the bbox of those points.
>>> box = Bbox.null()
>>> box.update_from_data_xy([[1, 1]])
>>> box
Bbox([[1.0, 1.0], [1.0, 1.0]])
>>> box.update_from_data_xy([[2, 3], [3, 2]], ignore=False)
>>> box
Bbox([[1.0, 1.0], [3.0, 3.0]])
Setting ``ignore=True`` is equivalent to starting over from a null bbox.
>>> box.update_from_data_xy([[1, 1]], ignore=True)
>>> box
Bbox([[1.0, 1.0], [1.0, 1.0]])
.. warning::
It is recommended to always specify ``ignore`` explicitly. If not, the
default value of ``ignore`` can be changed at any time by code with
access to your Bbox, for example using the method `~.Bbox.ignore`.
**Properties of the ``null`` bbox**
.. note::
The current behavior of `Bbox.null()` may be surprising as it does
not have all of the properties of the "empty set", and as such does
not behave like a "zero" object in the mathematical sense. We may
change that in the future (with a deprecation period).
The null bbox is the identity for intersections
>>> Bbox.intersection(Bbox([[1, 1], [3, 7]]), Bbox.null())
Bbox([[1.0, 1.0], [3.0, 7.0]])
except with itself, where it returns the full space.
>>> Bbox.intersection(Bbox.null(), Bbox.null())
Bbox([[-inf, -inf], [inf, inf]])
A union containing null will always return the full space (not the other
set!)
>>> Bbox.union([Bbox([[0, 0], [0, 0]]), Bbox.null()])
Bbox([[-inf, -inf], [inf, inf]])
"""
def __init__(self, points, **kwargs):
"""
Parameters
----------
points : `~numpy.ndarray`
A 2x2 numpy array of the form ``[[x0, y0], [x1, y1]]``.
"""
super().__init__(**kwargs)
points = np.asarray(points, float)
if points.shape != (2, 2):
raise ValueError('Bbox points must be of the form '
'"[[x0, y0], [x1, y1]]".')
self._points = points
self._minpos = np.array([np.inf, np.inf])
self._ignore = True
# it is helpful in some contexts to know if the bbox is a
# default or has been mutated; we store the orig points to
# support the mutated methods
self._points_orig = self._points.copy()
if DEBUG:
___init__ = __init__
def __init__(self, points, **kwargs):
self._check(points)
self.___init__(points, **kwargs)
def invalidate(self):
self._check(self._points)
super().invalidate()
def frozen(self):
# docstring inherited
frozen_bbox = super().frozen()
frozen_bbox._minpos = self.minpos.copy()
return frozen_bbox
def unit():
"""Create a new unit `Bbox` from (0, 0) to (1, 1)."""
return Bbox([[0, 0], [1, 1]])
def null():
"""Create a new null `Bbox` from (inf, inf) to (-inf, -inf)."""
return Bbox([[np.inf, np.inf], [-np.inf, -np.inf]])
def from_bounds(x0, y0, width, height):
"""
Create a new `Bbox` from *x0*, *y0*, *width* and *height*.
*width* and *height* may be negative.
"""
return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
def from_extents(*args, minpos=None):
"""
Create a new Bbox from *left*, *bottom*, *right* and *top*.
The *y*-axis increases upwards.
Parameters
----------
left, bottom, right, top : float
The four extents of the bounding box.
minpos : float or None
If this is supplied, the Bbox will have a minimum positive value
set. This is useful when dealing with logarithmic scales and other
scales where negative bounds result in floating point errors.
"""
bbox = Bbox(np.reshape(args, (2, 2)))
if minpos is not None:
bbox._minpos[:] = minpos
return bbox
def __format__(self, fmt):
return (
'Bbox(x0={0.x0:{1}}, y0={0.y0:{1}}, x1={0.x1:{1}}, y1={0.y1:{1}})'.
format(self, fmt))
def __str__(self):
return format(self, '')
def __repr__(self):
return 'Bbox([[{0.x0}, {0.y0}], [{0.x1}, {0.y1}]])'.format(self)
def ignore(self, value):
"""
Set whether the existing bounds of the box should be ignored
by subsequent calls to :meth:`update_from_data_xy`.
value : bool
- When ``True``, subsequent calls to :meth:`update_from_data_xy`
will ignore the existing bounds of the `Bbox`.
- When ``False``, subsequent calls to :meth:`update_from_data_xy`
will include the existing bounds of the `Bbox`.
"""
self._ignore = value
def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the `Bbox` to contain the vertices of the
provided path. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
Parameters
----------
path : `~matplotlib.path.Path`
ignore : bool, optional
- when ``True``, ignore the existing bounds of the `Bbox`.
- when ``False``, include the existing bounds of the `Bbox`.
- when ``None``, use the last value passed to :meth:`ignore`.
updatex, updatey : bool, default: True
When ``True``, update the x/y values.
"""
if ignore is None:
ignore = self._ignore
if path.vertices.size == 0:
return
points, minpos, changed = update_path_extents(
path, None, self._points, self._minpos, ignore)
if changed:
self.invalidate()
if updatex:
self._points[:, 0] = points[:, 0]
self._minpos[0] = minpos[0]
if updatey:
self._points[:, 1] = points[:, 1]
self._minpos[1] = minpos[1]
def update_from_data_x(self, x, ignore=None):
"""
Update the x-bounds of the `Bbox` based on the passed in data. After
updating, the bounds will have positive *width*, and *x0* will be the
minimal value.
Parameters
----------
x : `~numpy.ndarray`
Array of x-values.
ignore : bool, optional
- When ``True``, ignore the existing bounds of the `Bbox`.
- When ``False``, include the existing bounds of the `Bbox`.
- When ``None``, use the last value passed to :meth:`ignore`.
"""
x = np.ravel(x)
self.update_from_data_xy(np.column_stack([x, np.ones(x.size)]),
ignore=ignore, updatey=False)
def update_from_data_y(self, y, ignore=None):
"""
Update the y-bounds of the `Bbox` based on the passed in data. After
updating, the bounds will have positive *height*, and *y0* will be the
minimal value.
Parameters
----------
y : `~numpy.ndarray`
Array of y-values.
ignore : bool, optional
- When ``True``, ignore the existing bounds of the `Bbox`.
- When ``False``, include the existing bounds of the `Bbox`.
- When ``None``, use the last value passed to :meth:`ignore`.
"""
y = np.ravel(y)
self.update_from_data_xy(np.column_stack([np.ones(y.size), y]),
ignore=ignore, updatex=False)
def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the `Bbox` based on the passed in data. After
updating, the bounds will have positive *width* and *height*;
*x0* and *y0* will be the minimal values.
Parameters
----------
xy : `~numpy.ndarray`
A numpy array of 2D points.
ignore : bool, optional
- When ``True``, ignore the existing bounds of the `Bbox`.
- When ``False``, include the existing bounds of the `Bbox`.
- When ``None``, use the last value passed to :meth:`ignore`.
updatex, updatey : bool, default: True
When ``True``, update the x/y values.
"""
if len(xy) == 0:
return
path = Path(xy)
self.update_from_path(path, ignore=ignore,
updatex=updatex, updatey=updatey)
def x0(self, val):
self._points[0, 0] = val
self.invalidate()
def y0(self, val):
self._points[0, 1] = val
self.invalidate()
def x1(self, val):
self._points[1, 0] = val
self.invalidate()
def y1(self, val):
self._points[1, 1] = val
self.invalidate()
def p0(self, val):
self._points[0] = val
self.invalidate()
def p1(self, val):
self._points[1] = val
self.invalidate()
def intervalx(self, interval):
self._points[:, 0] = interval
self.invalidate()
def intervaly(self, interval):
self._points[:, 1] = interval
self.invalidate()
def bounds(self, bounds):
l, b, w, h = bounds
points = np.array([[l, b], [l + w, b + h]], float)
if np.any(self._points != points):
self._points = points
self.invalidate()
def minpos(self):
"""
The minimum positive value in both directions within the Bbox.
This is useful when dealing with logarithmic scales and other scales
where negative bounds result in floating point errors, and will be used
as the minimum extent instead of *p0*.
"""
return self._minpos
def minposx(self):
"""
The minimum positive value in the *x*-direction within the Bbox.
This is useful when dealing with logarithmic scales and other scales
where negative bounds result in floating point errors, and will be used
as the minimum *x*-extent instead of *x0*.
"""
return self._minpos[0]
def minposy(self):
"""
The minimum positive value in the *y*-direction within the Bbox.
This is useful when dealing with logarithmic scales and other scales
where negative bounds result in floating point errors, and will be used
as the minimum *y*-extent instead of *y0*.
"""
return self._minpos[1]
def get_points(self):
"""
Get the points of the bounding box directly as a numpy array
of the form: ``[[x0, y0], [x1, y1]]``.
"""
self._invalid = 0
return self._points
def set_points(self, points):
"""
Set the points of the bounding box directly from a numpy array
of the form: ``[[x0, y0], [x1, y1]]``. No error checking is
performed, as this method is mainly for internal use.
"""
if np.any(self._points != points):
self._points = points
self.invalidate()
def set(self, other):
"""
Set this bounding box from the "frozen" bounds of another `Bbox`.
"""
if np.any(self._points != other.get_points()):
self._points = other.get_points()
self.invalidate()
def mutated(self):
"""Return whether the bbox has changed since init."""
return self.mutatedx() or self.mutatedy()
def mutatedx(self):
"""Return whether the x-limits have changed since init."""
return (self._points[0, 0] != self._points_orig[0, 0] or
self._points[1, 0] != self._points_orig[1, 0])
def mutatedy(self):
"""Return whether the y-limits have changed since init."""
return (self._points[0, 1] != self._points_orig[0, 1] or
self._points[1, 1] != self._points_orig[1, 1])
The provided code snippet includes necessary dependencies for implementing the `_compat_get_offset` function. Write a Python function `def _compat_get_offset(meth)` to solve the following problem:
Decorator for the get_offset method of OffsetBox and subclasses, that allows supporting both the new signature (self, bbox, renderer) and the old signature (self, width, height, xdescent, ydescent, renderer).
Here is the function:
def _compat_get_offset(meth):
"""
Decorator for the get_offset method of OffsetBox and subclasses, that
allows supporting both the new signature (self, bbox, renderer) and the old
signature (self, width, height, xdescent, ydescent, renderer).
"""
sigs = [lambda self, width, height, xdescent, ydescent, renderer: locals(),
lambda self, bbox, renderer: locals()]
@functools.wraps(meth)
def get_offset(self, *args, **kwargs):
params = _api.select_matching_signature(sigs, self, *args, **kwargs)
bbox = (params["bbox"] if "bbox" in params else
Bbox.from_bounds(-params["xdescent"], -params["ydescent"],
params["width"], params["height"]))
return meth(params["self"], bbox, params["renderer"])
return get_offset | Decorator for the get_offset method of OffsetBox and subclasses, that allows supporting both the new signature (self, bbox, renderer) and the old signature (self, width, height, xdescent, ydescent, renderer). |
170,936 | import functools
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _docstring
import matplotlib.artist as martist
import matplotlib.path as mpath
import matplotlib.text as mtext
import matplotlib.transforms as mtransforms
from matplotlib.font_manager import FontProperties
from matplotlib.image import BboxImage
from matplotlib.patches import (
FancyBboxPatch, FancyArrowPatch, bbox_artist as mbbox_artist)
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
DEBUG = False
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs) | null |
170,937 | import functools
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _docstring
import matplotlib.artist as martist
import matplotlib.path as mpath
import matplotlib.text as mtext
import matplotlib.transforms as mtransforms
from matplotlib.font_manager import FontProperties
from matplotlib.image import BboxImage
from matplotlib.patches import (
FancyBboxPatch, FancyArrowPatch, bbox_artist as mbbox_artist)
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
DEBUG = False
def _bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs) | null |
170,938 | import functools
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _docstring
import matplotlib.artist as martist
import matplotlib.path as mpath
import matplotlib.text as mtext
import matplotlib.transforms as mtransforms
from matplotlib.font_manager import FontProperties
from matplotlib.image import BboxImage
from matplotlib.patches import (
FancyBboxPatch, FancyArrowPatch, bbox_artist as mbbox_artist)
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
The provided code snippet includes necessary dependencies for implementing the `_get_packed_offsets` function. Write a Python function `def _get_packed_offsets(widths, total, sep, mode="fixed")` to solve the following problem:
r""" Pack boxes specified by their *widths*. For simplicity of the description, the terminology used here assumes a horizontal layout, but the function works equally for a vertical layout. There are three packing *mode*\s: - 'fixed': The elements are packed tight to the left with a spacing of *sep* in between. If *total* is *None* the returned total will be the right edge of the last box. A non-*None* total will be passed unchecked to the output. In particular this means that right edge of the last box may be further to the right than the returned total. - 'expand': Distribute the boxes with equal spacing so that the left edge of the first box is at 0, and the right edge of the last box is at *total*. The parameter *sep* is ignored in this mode. A total of *None* is accepted and considered equal to 1. The total is returned unchanged (except for the conversion *None* to 1). If the total is smaller than the sum of the widths, the laid out boxes will overlap. - 'equal': If *total* is given, the total space is divided in N equal ranges and each box is left-aligned within its subspace. Otherwise (*total* is *None*), *sep* must be provided and each box is left-aligned in its subspace of width ``(max(widths) + sep)``. The total width is then calculated to be ``N * (max(widths) + sep)``. Parameters ---------- widths : list of float Widths of boxes to be packed. total : float or None Intended total length. *None* if not used. sep : float Spacing between boxes. mode : {'fixed', 'expand', 'equal'} The packing mode. Returns ------- total : float The total width needed to accommodate the laid out boxes. offsets : array of float The left offsets of the boxes.
Here is the function:
def _get_packed_offsets(widths, total, sep, mode="fixed"):
r"""
Pack boxes specified by their *widths*.
For simplicity of the description, the terminology used here assumes a
horizontal layout, but the function works equally for a vertical layout.
There are three packing *mode*\s:
- 'fixed': The elements are packed tight to the left with a spacing of
*sep* in between. If *total* is *None* the returned total will be the
right edge of the last box. A non-*None* total will be passed unchecked
to the output. In particular this means that right edge of the last
box may be further to the right than the returned total.
- 'expand': Distribute the boxes with equal spacing so that the left edge
of the first box is at 0, and the right edge of the last box is at
*total*. The parameter *sep* is ignored in this mode. A total of *None*
is accepted and considered equal to 1. The total is returned unchanged
(except for the conversion *None* to 1). If the total is smaller than
the sum of the widths, the laid out boxes will overlap.
- 'equal': If *total* is given, the total space is divided in N equal
ranges and each box is left-aligned within its subspace.
Otherwise (*total* is *None*), *sep* must be provided and each box is
left-aligned in its subspace of width ``(max(widths) + sep)``. The
total width is then calculated to be ``N * (max(widths) + sep)``.
Parameters
----------
widths : list of float
Widths of boxes to be packed.
total : float or None
Intended total length. *None* if not used.
sep : float
Spacing between boxes.
mode : {'fixed', 'expand', 'equal'}
The packing mode.
Returns
-------
total : float
The total width needed to accommodate the laid out boxes.
offsets : array of float
The left offsets of the boxes.
"""
_api.check_in_list(["fixed", "expand", "equal"], mode=mode)
if mode == "fixed":
offsets_ = np.cumsum([0] + [w + sep for w in widths])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
# This is a bit of a hack to avoid a TypeError when *total*
# is None and used in conjugation with tight layout.
if total is None:
total = 1
if len(widths) > 1:
sep = (total - sum(widths)) / (len(widths) - 1)
else:
sep = 0
offsets_ = np.cumsum([0] + [w + sep for w in widths])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(widths)
if total is None:
if sep is None:
raise ValueError("total and sep cannot both be None when "
"using layout mode 'equal'")
total = (maxh + sep) * len(widths)
else:
sep = total / len(widths) - maxh
offsets = (maxh + sep) * np.arange(len(widths))
return total, offsets | r""" Pack boxes specified by their *widths*. For simplicity of the description, the terminology used here assumes a horizontal layout, but the function works equally for a vertical layout. There are three packing *mode*\s: - 'fixed': The elements are packed tight to the left with a spacing of *sep* in between. If *total* is *None* the returned total will be the right edge of the last box. A non-*None* total will be passed unchecked to the output. In particular this means that right edge of the last box may be further to the right than the returned total. - 'expand': Distribute the boxes with equal spacing so that the left edge of the first box is at 0, and the right edge of the last box is at *total*. The parameter *sep* is ignored in this mode. A total of *None* is accepted and considered equal to 1. The total is returned unchanged (except for the conversion *None* to 1). If the total is smaller than the sum of the widths, the laid out boxes will overlap. - 'equal': If *total* is given, the total space is divided in N equal ranges and each box is left-aligned within its subspace. Otherwise (*total* is *None*), *sep* must be provided and each box is left-aligned in its subspace of width ``(max(widths) + sep)``. The total width is then calculated to be ``N * (max(widths) + sep)``. Parameters ---------- widths : list of float Widths of boxes to be packed. total : float or None Intended total length. *None* if not used. sep : float Spacing between boxes. mode : {'fixed', 'expand', 'equal'} The packing mode. Returns ------- total : float The total width needed to accommodate the laid out boxes. offsets : array of float The left offsets of the boxes. |
170,939 | import functools
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _docstring
import matplotlib.artist as martist
import matplotlib.path as mpath
import matplotlib.text as mtext
import matplotlib.transforms as mtransforms
from matplotlib.font_manager import FontProperties
from matplotlib.image import BboxImage
from matplotlib.patches import (
FancyBboxPatch, FancyArrowPatch, bbox_artist as mbbox_artist)
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
The provided code snippet includes necessary dependencies for implementing the `_get_aligned_offsets` function. Write a Python function `def _get_aligned_offsets(yspans, height, align="baseline")` to solve the following problem:
Align boxes each specified by their ``(y0, y1)`` spans. For simplicity of the description, the terminology used here assumes a horizontal layout (i.e., vertical alignment), but the function works equally for a vertical layout. Parameters ---------- yspans List of (y0, y1) spans of boxes to be aligned. height : float or None Intended total height. If None, the maximum of the heights (``y1 - y0``) in *yspans* is used. align : {'baseline', 'left', 'top', 'right', 'bottom', 'center'} The alignment anchor of the boxes. Returns ------- (y0, y1) y range spanned by the packing. If a *height* was originally passed in, then for all alignments other than "baseline", a span of ``(0, height)`` is used without checking that it is actually large enough). descent The descent of the packing. offsets The bottom offsets of the boxes.
Here is the function:
def _get_aligned_offsets(yspans, height, align="baseline"):
"""
Align boxes each specified by their ``(y0, y1)`` spans.
For simplicity of the description, the terminology used here assumes a
horizontal layout (i.e., vertical alignment), but the function works
equally for a vertical layout.
Parameters
----------
yspans
List of (y0, y1) spans of boxes to be aligned.
height : float or None
Intended total height. If None, the maximum of the heights
(``y1 - y0``) in *yspans* is used.
align : {'baseline', 'left', 'top', 'right', 'bottom', 'center'}
The alignment anchor of the boxes.
Returns
-------
(y0, y1)
y range spanned by the packing. If a *height* was originally passed
in, then for all alignments other than "baseline", a span of ``(0,
height)`` is used without checking that it is actually large enough).
descent
The descent of the packing.
offsets
The bottom offsets of the boxes.
"""
_api.check_in_list(
["baseline", "left", "top", "right", "bottom", "center"], align=align)
if height is None:
height = max(y1 - y0 for y0, y1 in yspans)
if align == "baseline":
yspan = (min(y0 for y0, y1 in yspans), max(y1 for y0, y1 in yspans))
offsets = [0] * len(yspans)
elif align in ["left", "bottom"]:
yspan = (0, height)
offsets = [-y0 for y0, y1 in yspans]
elif align in ["right", "top"]:
yspan = (0, height)
offsets = [height - y1 for y0, y1 in yspans]
elif align == "center":
yspan = (0, height)
offsets = [(height - (y1 - y0)) * .5 - y0 for y0, y1 in yspans]
return yspan, offsets | Align boxes each specified by their ``(y0, y1)`` spans. For simplicity of the description, the terminology used here assumes a horizontal layout (i.e., vertical alignment), but the function works equally for a vertical layout. Parameters ---------- yspans List of (y0, y1) spans of boxes to be aligned. height : float or None Intended total height. If None, the maximum of the heights (``y1 - y0``) in *yspans* is used. align : {'baseline', 'left', 'top', 'right', 'bottom', 'center'} The alignment anchor of the boxes. Returns ------- (y0, y1) y range spanned by the packing. If a *height* was originally passed in, then for all alignments other than "baseline", a span of ``(0, height)`` is used without checking that it is actually large enough). descent The descent of the packing. offsets The bottom offsets of the boxes. |
170,940 | import functools
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _docstring
import matplotlib.artist as martist
import matplotlib.path as mpath
import matplotlib.text as mtext
import matplotlib.transforms as mtransforms
from matplotlib.font_manager import FontProperties
from matplotlib.image import BboxImage
from matplotlib.patches import (
FancyBboxPatch, FancyArrowPatch, bbox_artist as mbbox_artist)
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
The provided code snippet includes necessary dependencies for implementing the `_get_anchored_bbox` function. Write a Python function `def _get_anchored_bbox(loc, bbox, parentbbox, borderpad)` to solve the following problem:
Return the (x, y) position of the *bbox* anchored at the *parentbbox* with the *loc* code with the *borderpad*.
Here is the function:
def _get_anchored_bbox(loc, bbox, parentbbox, borderpad):
"""
Return the (x, y) position of the *bbox* anchored at the *parentbbox* with
the *loc* code with the *borderpad*.
"""
# This is only called internally and *loc* should already have been
# validated. If 0 (None), we just let ``bbox.anchored`` raise.
c = [None, "NE", "NW", "SW", "SE", "E", "W", "E", "S", "N", "C"][loc]
container = parentbbox.padded(-borderpad)
return bbox.anchored(c, container=container).p0 | Return the (x, y) position of the *bbox* anchored at the *parentbbox* with the *loc* code with the *borderpad*. |
170,941 | import math
import types
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook
from matplotlib.axes import Axes
import matplotlib.axis as maxis
import matplotlib.markers as mmarkers
import matplotlib.patches as mpatches
from matplotlib.path import Path
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
from matplotlib.spines import Spine
The provided code snippet includes necessary dependencies for implementing the `_is_full_circle_deg` function. Write a Python function `def _is_full_circle_deg(thetamin, thetamax)` to solve the following problem:
Determine if a wedge (in degrees) spans the full circle. The condition is derived from :class:`~matplotlib.patches.Wedge`.
Here is the function:
def _is_full_circle_deg(thetamin, thetamax):
"""
Determine if a wedge (in degrees) spans the full circle.
The condition is derived from :class:`~matplotlib.patches.Wedge`.
"""
return abs(abs(thetamax - thetamin) - 360.0) < 1e-12 | Determine if a wedge (in degrees) spans the full circle. The condition is derived from :class:`~matplotlib.patches.Wedge`. |
170,942 | import math
import types
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook
from matplotlib.axes import Axes
import matplotlib.axis as maxis
import matplotlib.markers as mmarkers
import matplotlib.patches as mpatches
from matplotlib.path import Path
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
from matplotlib.spines import Spine
The provided code snippet includes necessary dependencies for implementing the `_is_full_circle_rad` function. Write a Python function `def _is_full_circle_rad(thetamin, thetamax)` to solve the following problem:
Determine if a wedge (in radians) spans the full circle. The condition is derived from :class:`~matplotlib.patches.Wedge`.
Here is the function:
def _is_full_circle_rad(thetamin, thetamax):
"""
Determine if a wedge (in radians) spans the full circle.
The condition is derived from :class:`~matplotlib.patches.Wedge`.
"""
return abs(abs(thetamax - thetamin) - 2 * np.pi) < 1.74e-14 | Determine if a wedge (in radians) spans the full circle. The condition is derived from :class:`~matplotlib.patches.Wedge`. |
170,943 | from collections import namedtuple
import logging
import re
from ._mathtext_data import uni2type1
_log = logging.getLogger(__name__)
def _to_int(x):
# Some AFM files have floats where we are expecting ints -- there is
# probably a better way to handle this (support floats, round rather than
# truncate). But I don't know what the best approach is now and this
# change to _to_int should at least prevent Matplotlib from crashing on
# these. JDH (2009-11-06)
return int(float(x))
def _to_float(x):
# Some AFM files use "," instead of "." as decimal separator -- this
# shouldn't be ambiguous (unless someone is wicked enough to use "," as
# thousands separator...).
if isinstance(x, bytes):
# Encoding doesn't really matter -- if we have codepoints >127 the call
# to float() will error anyways.
x = x.decode('latin-1')
return float(x.replace(',', '.'))
def _to_str(x):
return x.decode('utf8')
def _to_list_of_ints(s):
s = s.replace(b',', b' ')
return [_to_int(val) for val in s.split()]
def _to_bool(s):
if s.lower().strip() in (b'false', b'0', b'no'):
return False
else:
return True
The provided code snippet includes necessary dependencies for implementing the `_parse_header` function. Write a Python function `def _parse_header(fh)` to solve the following problem:
Read the font metrics header (up to the char metrics) and returns a dictionary mapping *key* to *val*. *val* will be converted to the appropriate python type as necessary; e.g.: * 'False'->False * '0'->0 * '-168 -218 1000 898'-> [-168, -218, 1000, 898] Dictionary keys are StartFontMetrics, FontName, FullName, FamilyName, Weight, ItalicAngle, IsFixedPitch, FontBBox, UnderlinePosition, UnderlineThickness, Version, Notice, EncodingScheme, CapHeight, XHeight, Ascender, Descender, StartCharMetrics
Here is the function:
def _parse_header(fh):
"""
Read the font metrics header (up to the char metrics) and returns
a dictionary mapping *key* to *val*. *val* will be converted to the
appropriate python type as necessary; e.g.:
* 'False'->False
* '0'->0
* '-168 -218 1000 898'-> [-168, -218, 1000, 898]
Dictionary keys are
StartFontMetrics, FontName, FullName, FamilyName, Weight,
ItalicAngle, IsFixedPitch, FontBBox, UnderlinePosition,
UnderlineThickness, Version, Notice, EncodingScheme, CapHeight,
XHeight, Ascender, Descender, StartCharMetrics
"""
header_converters = {
b'StartFontMetrics': _to_float,
b'FontName': _to_str,
b'FullName': _to_str,
b'FamilyName': _to_str,
b'Weight': _to_str,
b'ItalicAngle': _to_float,
b'IsFixedPitch': _to_bool,
b'FontBBox': _to_list_of_ints,
b'UnderlinePosition': _to_float,
b'UnderlineThickness': _to_float,
b'Version': _to_str,
# Some AFM files have non-ASCII characters (which are not allowed by
# the spec). Given that there is actually no public API to even access
# this field, just return it as straight bytes.
b'Notice': lambda x: x,
b'EncodingScheme': _to_str,
b'CapHeight': _to_float, # Is the second version a mistake, or
b'Capheight': _to_float, # do some AFM files contain 'Capheight'? -JKS
b'XHeight': _to_float,
b'Ascender': _to_float,
b'Descender': _to_float,
b'StdHW': _to_float,
b'StdVW': _to_float,
b'StartCharMetrics': _to_int,
b'CharacterSet': _to_str,
b'Characters': _to_int,
}
d = {}
first_line = True
for line in fh:
line = line.rstrip()
if line.startswith(b'Comment'):
continue
lst = line.split(b' ', 1)
key = lst[0]
if first_line:
# AFM spec, Section 4: The StartFontMetrics keyword
# [followed by a version number] must be the first line in
# the file, and the EndFontMetrics keyword must be the
# last non-empty line in the file. We just check the
# first header entry.
if key != b'StartFontMetrics':
raise RuntimeError('Not an AFM file')
first_line = False
if len(lst) == 2:
val = lst[1]
else:
val = b''
try:
converter = header_converters[key]
except KeyError:
_log.error('Found an unknown keyword in AFM header (was %r)' % key)
continue
try:
d[key] = converter(val)
except ValueError:
_log.error('Value error parsing header in AFM: %s, %s', key, val)
continue
if key == b'StartCharMetrics':
break
else:
raise RuntimeError('Bad parse')
return d | Read the font metrics header (up to the char metrics) and returns a dictionary mapping *key* to *val*. *val* will be converted to the appropriate python type as necessary; e.g.: * 'False'->False * '0'->0 * '-168 -218 1000 898'-> [-168, -218, 1000, 898] Dictionary keys are StartFontMetrics, FontName, FullName, FamilyName, Weight, ItalicAngle, IsFixedPitch, FontBBox, UnderlinePosition, UnderlineThickness, Version, Notice, EncodingScheme, CapHeight, XHeight, Ascender, Descender, StartCharMetrics |
170,944 | from collections import namedtuple
import logging
import re
from ._mathtext_data import uni2type1
def _to_int(x):
# Some AFM files have floats where we are expecting ints -- there is
# probably a better way to handle this (support floats, round rather than
# truncate). But I don't know what the best approach is now and this
# change to _to_int should at least prevent Matplotlib from crashing on
# these. JDH (2009-11-06)
return int(float(x))
def _to_float(x):
# Some AFM files use "," instead of "." as decimal separator -- this
# shouldn't be ambiguous (unless someone is wicked enough to use "," as
# thousands separator...).
if isinstance(x, bytes):
# Encoding doesn't really matter -- if we have codepoints >127 the call
# to float() will error anyways.
x = x.decode('latin-1')
return float(x.replace(',', '.'))
def _to_str(x):
return x.decode('utf8')
def _to_list_of_floats(s):
return [_to_float(val) for val in s.split()]
CharMetrics = namedtuple('CharMetrics', 'width, name, bbox')
CharMetrics.__doc__ = """
Represents the character metrics of a single character.
Notes
-----
The fields do currently only describe a subset of character metrics
information defined in the AFM standard.
"""
CharMetrics.width.__doc__ = """The character width (WX)."""
CharMetrics.name.__doc__ = """The character name (N)."""
CharMetrics.bbox.__doc__ = """
The bbox of the character (B) as a tuple (*llx*, *lly*, *urx*, *ury*)."""
The provided code snippet includes necessary dependencies for implementing the `_parse_char_metrics` function. Write a Python function `def _parse_char_metrics(fh)` to solve the following problem:
Parse the given filehandle for character metrics information and return the information as dicts. It is assumed that the file cursor is on the line behind 'StartCharMetrics'. Returns ------- ascii_d : dict A mapping "ASCII num of the character" to `.CharMetrics`. name_d : dict A mapping "character name" to `.CharMetrics`. Notes ----- This function is incomplete per the standard, but thus far parses all the sample afm files tried.
Here is the function:
def _parse_char_metrics(fh):
"""
Parse the given filehandle for character metrics information and return
the information as dicts.
It is assumed that the file cursor is on the line behind
'StartCharMetrics'.
Returns
-------
ascii_d : dict
A mapping "ASCII num of the character" to `.CharMetrics`.
name_d : dict
A mapping "character name" to `.CharMetrics`.
Notes
-----
This function is incomplete per the standard, but thus far parses
all the sample afm files tried.
"""
required_keys = {'C', 'WX', 'N', 'B'}
ascii_d = {}
name_d = {}
for line in fh:
# We are defensively letting values be utf8. The spec requires
# ascii, but there are non-compliant fonts in circulation
line = _to_str(line.rstrip()) # Convert from byte-literal
if line.startswith('EndCharMetrics'):
return ascii_d, name_d
# Split the metric line into a dictionary, keyed by metric identifiers
vals = dict(s.strip().split(' ', 1) for s in line.split(';') if s)
# There may be other metrics present, but only these are needed
if not required_keys.issubset(vals):
raise RuntimeError('Bad char metrics line: %s' % line)
num = _to_int(vals['C'])
wx = _to_float(vals['WX'])
name = vals['N']
bbox = _to_list_of_floats(vals['B'])
bbox = list(map(int, bbox))
metrics = CharMetrics(wx, name, bbox)
# Workaround: If the character name is 'Euro', give it the
# corresponding character code, according to WinAnsiEncoding (see PDF
# Reference).
if name == 'Euro':
num = 128
elif name == 'minus':
num = ord("\N{MINUS SIGN}") # 0x2212
if num != -1:
ascii_d[num] = metrics
name_d[name] = metrics
raise RuntimeError('Bad parse') | Parse the given filehandle for character metrics information and return the information as dicts. It is assumed that the file cursor is on the line behind 'StartCharMetrics'. Returns ------- ascii_d : dict A mapping "ASCII num of the character" to `.CharMetrics`. name_d : dict A mapping "character name" to `.CharMetrics`. Notes ----- This function is incomplete per the standard, but thus far parses all the sample afm files tried. |
170,945 | from collections import namedtuple
import logging
import re
from ._mathtext_data import uni2type1
def _parse_kern_pairs(fh):
"""
Return a kern pairs dictionary; keys are (*char1*, *char2*) tuples and
values are the kern pair value. For example, a kern pairs line like
``KPX A y -50``
will be represented as::
d[ ('A', 'y') ] = -50
"""
line = next(fh)
if not line.startswith(b'StartKernPairs'):
raise RuntimeError('Bad start of kern pairs data: %s' % line)
d = {}
for line in fh:
line = line.rstrip()
if not line:
continue
if line.startswith(b'EndKernPairs'):
next(fh) # EndKernData
return d
vals = line.split()
if len(vals) != 4 or vals[0] != b'KPX':
raise RuntimeError('Bad kern pairs line: %s' % line)
c1, c2, val = _to_str(vals[1]), _to_str(vals[2]), _to_float(vals[3])
d[(c1, c2)] = val
raise RuntimeError('Bad kern pairs parse')
def _parse_composites(fh):
"""
Parse the given filehandle for composites information return them as a
dict.
It is assumed that the file cursor is on the line behind 'StartComposites'.
Returns
-------
dict
A dict mapping composite character names to a parts list. The parts
list is a list of `.CompositePart` entries describing the parts of
the composite.
Examples
--------
A composite definition line::
CC Aacute 2 ; PCC A 0 0 ; PCC acute 160 170 ;
will be represented as::
composites['Aacute'] = [CompositePart(name='A', dx=0, dy=0),
CompositePart(name='acute', dx=160, dy=170)]
"""
composites = {}
for line in fh:
line = line.rstrip()
if not line:
continue
if line.startswith(b'EndComposites'):
return composites
vals = line.split(b';')
cc = vals[0].split()
name, _num_parts = cc[1], _to_int(cc[2])
pccParts = []
for s in vals[1:-1]:
pcc = s.split()
part = CompositePart(pcc[1], _to_float(pcc[2]), _to_float(pcc[3]))
pccParts.append(part)
composites[name] = pccParts
raise RuntimeError('Bad composites parse')
The provided code snippet includes necessary dependencies for implementing the `_parse_optional` function. Write a Python function `def _parse_optional(fh)` to solve the following problem:
Parse the optional fields for kern pair data and composites. Returns ------- kern_data : dict A dict containing kerning information. May be empty. See `._parse_kern_pairs`. composites : dict A dict containing composite information. May be empty. See `._parse_composites`.
Here is the function:
def _parse_optional(fh):
"""
Parse the optional fields for kern pair data and composites.
Returns
-------
kern_data : dict
A dict containing kerning information. May be empty.
See `._parse_kern_pairs`.
composites : dict
A dict containing composite information. May be empty.
See `._parse_composites`.
"""
optional = {
b'StartKernData': _parse_kern_pairs,
b'StartComposites': _parse_composites,
}
d = {b'StartKernData': {},
b'StartComposites': {}}
for line in fh:
line = line.rstrip()
if not line:
continue
key = line.split()[0]
if key in optional:
d[key] = optional[key](fh)
return d[b'StartKernData'], d[b'StartComposites'] | Parse the optional fields for kern pair data and composites. Returns ------- kern_data : dict A dict containing kerning information. May be empty. See `._parse_kern_pairs`. composites : dict A dict containing composite information. May be empty. See `._parse_composites`. |
170,946 | import copy
import functools
import textwrap
import weakref
import math
import numpy as np
from numpy.linalg import inv
from matplotlib import _api
from matplotlib._path import (
affine_transform, count_bboxes_overlapping_bbox, update_path_extents)
from .path import Path
The provided code snippet includes necessary dependencies for implementing the `_make_str_method` function. Write a Python function `def _make_str_method(*args, **kwargs)` to solve the following problem:
Generate a ``__str__`` method for a `.Transform` subclass. After :: class T: __str__ = _make_str_method("attr", key="other") ``str(T(...))`` will be .. code-block:: text {type(T).__name__}( {self.attr}, key={self.other})
Here is the function:
def _make_str_method(*args, **kwargs):
"""
Generate a ``__str__`` method for a `.Transform` subclass.
After ::
class T:
__str__ = _make_str_method("attr", key="other")
``str(T(...))`` will be
.. code-block:: text
{type(T).__name__}(
{self.attr},
key={self.other})
"""
indent = functools.partial(textwrap.indent, prefix=" " * 4)
def strrepr(x): return repr(x) if isinstance(x, str) else str(x)
return lambda self: (
type(self).__name__ + "("
+ ",".join([*(indent("\n" + strrepr(getattr(self, arg)))
for arg in args),
*(indent("\n" + k + "=" + strrepr(getattr(self, arg)))
for k, arg in kwargs.items())])
+ ")") | Generate a ``__str__`` method for a `.Transform` subclass. After :: class T: __str__ = _make_str_method("attr", key="other") ``str(T(...))`` will be .. code-block:: text {type(T).__name__}( {self.attr}, key={self.other}) |
170,947 | import copy
import functools
import textwrap
import weakref
import math
import numpy as np
from numpy.linalg import inv
from matplotlib import _api
from matplotlib._path import (
affine_transform, count_bboxes_overlapping_bbox, update_path_extents)
from .path import Path
class Affine2DBase(AffineBase):
"""
The base class of all 2D affine transformations.
2D affine transformations are performed using a 3x3 numpy array::
a c e
b d f
0 0 1
This class provides the read-only interface. For a mutable 2D
affine transformation, use `Affine2D`.
Subclasses of this class will generally only need to override a
constructor and :meth:`get_matrix` that generates a custom 3x3 matrix.
"""
input_dims = 2
output_dims = 2
def frozen(self):
# docstring inherited
return Affine2D(self.get_matrix().copy())
def is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == mtx[1, 0] == 0.0
def to_values(self):
"""
Return the values of the matrix as an ``(a, b, c, d, e, f)`` tuple.
"""
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flat)
def transform_affine(self, points):
mtx = self.get_matrix()
if isinstance(points, np.ma.MaskedArray):
tpoints = affine_transform(points.data, mtx)
return np.ma.MaskedArray(tpoints, mask=np.ma.getmask(points))
return affine_transform(points, mtx)
if DEBUG:
_transform_affine = transform_affine
def transform_affine(self, points):
# docstring inherited
# The major speed trap here is just converting to the
# points to an array in the first place. If we can use
# more arrays upstream, that should help here.
if not isinstance(points, np.ndarray):
_api.warn_external(
f'A non-numpy array of type {type(points)} was passed in '
f'for transformation, which results in poor performance.')
return self._transform_affine(points)
def inverted(self):
# docstring inherited
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
shorthand_name = None
if self._shorthand_name:
shorthand_name = '(%s)-1' % self._shorthand_name
self._inverted = Affine2D(inv(mtx), shorthand_name=shorthand_name)
self._invalid = 0
return self._inverted
class BlendedGenericTransform(_BlendedMixin, Transform):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This "generic" version can handle any given child transform in the
*x*- and *y*-directions.
"""
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to transform the
*x*-axis and *y_transform* to transform the *y*-axis.
You will generally not call this constructor directly but use the
`blended_transform_factory` function instead, which can determine
automatically which kind of blended transform to create.
"""
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
def depth(self):
return max(self._x.depth, self._y.depth)
def contains_branch(self, other):
# A blended transform cannot possibly contain a branch from two
# different transforms.
return False
is_affine = property(lambda self: self._x.is_affine and self._y.is_affine)
has_inverse = property(
lambda self: self._x.has_inverse and self._y.has_inverse)
def frozen(self):
# docstring inherited
return blended_transform_factory(self._x.frozen(), self._y.frozen())
def transform_non_affine(self, points):
# docstring inherited
if self._x.is_affine and self._y.is_affine:
return points
x = self._x
y = self._y
if x == y and x.input_dims == 2:
return x.transform_non_affine(points)
if x.input_dims == 2:
x_points = x.transform_non_affine(points)[:, 0:1]
else:
x_points = x.transform_non_affine(points[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform_non_affine(points)[:, 1:]
else:
y_points = y.transform_non_affine(points[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if (isinstance(x_points, np.ma.MaskedArray) or
isinstance(y_points, np.ma.MaskedArray)):
return np.ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
def inverted(self):
# docstring inherited
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
def get_affine(self):
# docstring inherited
if self._invalid or self._affine is None:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
# We already know the transforms are separable, so we can skip
# setting b and c to zero.
mtx = np.array([x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]])
self._affine = Affine2D(mtx)
self._invalid = 0
return self._affine
class BlendedAffine2D(_BlendedMixin, Affine2DBase):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This version is an optimization for the case where both child
transforms are of type `Affine2DBase`.
"""
is_separable = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to transform the
*x*-axis and *y_transform* to transform the *y*-axis.
Both *x_transform* and *y_transform* must be 2D affine transforms.
You will generally not call this constructor directly but use the
`blended_transform_factory` function instead, which can determine
automatically which kind of blended transform to create.
"""
is_affine = x_transform.is_affine and y_transform.is_affine
is_separable = x_transform.is_separable and y_transform.is_separable
is_correct = is_affine and is_separable
if not is_correct:
raise ValueError("Both *x_transform* and *y_transform* must be 2D "
"affine transforms")
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
Affine2DBase.__init__(self)
self._mtx = None
def get_matrix(self):
# docstring inherited
if self._invalid:
if self._x == self._y:
self._mtx = self._x.get_matrix()
else:
x_mtx = self._x.get_matrix()
y_mtx = self._y.get_matrix()
# We already know the transforms are separable, so we can skip
# setting b and c to zero.
self._mtx = np.array([x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]])
self._inverted = None
self._invalid = 0
return self._mtx
The provided code snippet includes necessary dependencies for implementing the `blended_transform_factory` function. Write a Python function `def blended_transform_factory(x_transform, y_transform)` to solve the following problem:
Create a new "blended" transform using *x_transform* to transform the *x*-axis and *y_transform* to transform the *y*-axis. A faster version of the blended transform is returned for the case where both child transforms are affine.
Here is the function:
def blended_transform_factory(x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to transform
the *x*-axis and *y_transform* to transform the *y*-axis.
A faster version of the blended transform is returned for the case
where both child transforms are affine.
"""
if (isinstance(x_transform, Affine2DBase) and
isinstance(y_transform, Affine2DBase)):
return BlendedAffine2D(x_transform, y_transform)
return BlendedGenericTransform(x_transform, y_transform) | Create a new "blended" transform using *x_transform* to transform the *x*-axis and *y_transform* to transform the *y*-axis. A faster version of the blended transform is returned for the case where both child transforms are affine. |
170,948 | import copy
import functools
import textwrap
import weakref
import math
import numpy as np
from numpy.linalg import inv
from matplotlib import _api
from matplotlib._path import (
affine_transform, count_bboxes_overlapping_bbox, update_path_extents)
from .path import Path
class Affine2D(Affine2DBase):
"""
A mutable 2D affine transformation.
"""
def __init__(self, matrix=None, **kwargs):
"""
Initialize an Affine transform from a 3x3 numpy float array::
a c e
b d f
0 0 1
If *matrix* is None, initialize with the identity transform.
"""
super().__init__(**kwargs)
if matrix is None:
# A bit faster than np.identity(3).
matrix = IdentityTransform._mtx
self._mtx = matrix.copy()
self._invalid = 0
_base_str = _make_str_method("_mtx")
def __str__(self):
return (self._base_str()
if (self._mtx != np.diag(np.diag(self._mtx))).any()
else f"Affine2D().scale({self._mtx[0, 0]}, {self._mtx[1, 1]})"
if self._mtx[0, 0] != self._mtx[1, 1]
else f"Affine2D().scale({self._mtx[0, 0]})")
def from_values(a, b, c, d, e, f):
"""
Create a new Affine2D instance from the given values::
a c e
b d f
0 0 1
.
"""
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], float).reshape((3, 3)))
def get_matrix(self):
"""
Get the underlying transformation matrix as a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
if self._invalid:
self._inverted = None
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
"""
Set the underlying transformation matrix from a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
self._mtx = mtx
self.invalidate()
def set(self, other):
"""
Set this transformation from the frozen copy of another
`Affine2DBase` object.
"""
_api.check_isinstance(Affine2DBase, other=other)
self._mtx = other.get_matrix()
self.invalidate()
def identity():
"""
Return a new `Affine2D` object that is the identity transform.
Unless this transform will be mutated later on, consider using
the faster `IdentityTransform` class instead.
"""
return Affine2D()
def clear(self):
"""
Reset the underlying matrix to the identity transform.
"""
# A bit faster than np.identity(3).
self._mtx = IdentityTransform._mtx.copy()
self.invalidate()
return self
def rotate(self, theta):
"""
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
a = math.cos(theta)
b = math.sin(theta)
mtx = self._mtx
# Operating and assigning one scalar at a time is much faster.
(xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()
# mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx
mtx[0, 0] = a * xx - b * yx
mtx[0, 1] = a * xy - b * yy
mtx[0, 2] = a * x0 - b * y0
mtx[1, 0] = b * xx + a * yx
mtx[1, 1] = b * xy + a * yy
mtx[1, 2] = b * x0 + a * y0
self.invalidate()
return self
def rotate_deg(self, degrees):
"""
Add a rotation (in degrees) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.rotate(math.radians(degrees))
def rotate_around(self, x, y, theta):
"""
Add a rotation (in radians) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
"""
Add a rotation (in degrees) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
# Cast to float to avoid wraparound issues with uint8's
x, y = float(x), float(y)
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
"""
Add a translation in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
self._mtx[0, 2] += tx
self._mtx[1, 2] += ty
self.invalidate()
return self
def scale(self, sx, sy=None):
"""
Add a scale in place.
If *sy* is None, the same scale is applied in both the *x*- and
*y*-directions.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
if sy is None:
sy = sx
# explicit element-wise scaling is fastest
self._mtx[0, 0] *= sx
self._mtx[0, 1] *= sx
self._mtx[0, 2] *= sx
self._mtx[1, 0] *= sy
self._mtx[1, 1] *= sy
self._mtx[1, 2] *= sy
self.invalidate()
return self
def skew(self, xShear, yShear):
"""
Add a skew in place.
*xShear* and *yShear* are the shear angles along the *x*- and
*y*-axes, respectively, in radians.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
rx = math.tan(xShear)
ry = math.tan(yShear)
mtx = self._mtx
# Operating and assigning one scalar at a time is much faster.
(xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()
# mtx = [[1 rx 0], [ry 1 0], [0 0 1]] * mtx
mtx[0, 0] += rx * yx
mtx[0, 1] += rx * yy
mtx[0, 2] += rx * y0
mtx[1, 0] += ry * xx
mtx[1, 1] += ry * xy
mtx[1, 2] += ry * x0
self.invalidate()
return self
def skew_deg(self, xShear, yShear):
"""
Add a skew in place.
*xShear* and *yShear* are the shear angles along the *x*- and
*y*-axes, respectively, in degrees.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.skew(math.radians(xShear), math.radians(yShear))
class IdentityTransform(Affine2DBase):
"""
A special class that does one thing, the identity transform, in a
fast way.
"""
_mtx = np.identity(3)
def frozen(self):
# docstring inherited
return self
__str__ = _make_str_method()
def get_matrix(self):
# docstring inherited
return self._mtx
def transform(self, points):
# docstring inherited
return np.asanyarray(points)
def transform_affine(self, points):
# docstring inherited
return np.asanyarray(points)
def transform_non_affine(self, points):
# docstring inherited
return np.asanyarray(points)
def transform_path(self, path):
# docstring inherited
return path
def transform_path_affine(self, path):
# docstring inherited
return path
def transform_path_non_affine(self, path):
# docstring inherited
return path
def get_affine(self):
# docstring inherited
return self
def inverted(self):
# docstring inherited
return self
class CompositeGenericTransform(Transform):
"""
A composite transform formed by applying transform *a* then
transform *b*.
This "generic" version can handle any two arbitrary
transformations.
"""
pass_through = True
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
You will generally not call this constructor directly but write ``a +
b`` instead, which will automatically choose the best kind of composite
transform instance to create.
"""
if a.output_dims != b.input_dims:
raise ValueError("The output dimension of 'a' must be equal to "
"the input dimensions of 'b'")
self.input_dims = a.input_dims
self.output_dims = b.output_dims
super().__init__(**kwargs)
self._a = a
self._b = b
self.set_children(a, b)
def frozen(self):
# docstring inherited
self._invalid = 0
frozen = composite_transform_factory(
self._a.frozen(), self._b.frozen())
if not isinstance(frozen, CompositeGenericTransform):
return frozen.frozen()
return frozen
def _invalidate_internal(self, value, invalidating_node):
# In some cases for a composite transform, an invalidating call to
# AFFINE_ONLY needs to be extended to invalidate the NON_AFFINE part
# too. These cases are when the right hand transform is non-affine and
# either:
# (a) the left hand transform is non affine
# (b) it is the left hand node which has triggered the invalidation
if (value == Transform.INVALID_AFFINE and
not self._b.is_affine and
(not self._a.is_affine or invalidating_node is self._a)):
value = Transform.INVALID
super()._invalidate_internal(value=value,
invalidating_node=invalidating_node)
def __eq__(self, other):
if isinstance(other, (CompositeGenericTransform, CompositeAffine2D)):
return self is other or (self._a == other._a
and self._b == other._b)
else:
return False
def _iter_break_from_left_to_right(self):
for left, right in self._a._iter_break_from_left_to_right():
yield left, right + self._b
for left, right in self._b._iter_break_from_left_to_right():
yield self._a + left, right
depth = property(lambda self: self._a.depth + self._b.depth)
is_affine = property(lambda self: self._a.is_affine and self._b.is_affine)
is_separable = property(
lambda self: self._a.is_separable and self._b.is_separable)
has_inverse = property(
lambda self: self._a.has_inverse and self._b.has_inverse)
__str__ = _make_str_method("_a", "_b")
def transform_affine(self, points):
# docstring inherited
return self.get_affine().transform(points)
def transform_non_affine(self, points):
# docstring inherited
if self._a.is_affine and self._b.is_affine:
return points
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_non_affine(points)
else:
return self._b.transform_non_affine(self._a.transform(points))
def transform_path_non_affine(self, path):
# docstring inherited
if self._a.is_affine and self._b.is_affine:
return path
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_path_non_affine(path)
else:
return self._b.transform_path_non_affine(
self._a.transform_path(path))
def get_affine(self):
# docstring inherited
if not self._b.is_affine:
return self._b.get_affine()
else:
return Affine2D(np.dot(self._b.get_affine().get_matrix(),
self._a.get_affine().get_matrix()))
def inverted(self):
# docstring inherited
return CompositeGenericTransform(
self._b.inverted(), self._a.inverted())
class CompositeAffine2D(Affine2DBase):
"""
A composite transform formed by applying transform *a* then transform *b*.
This version is an optimization that handles the case where both *a*
and *b* are 2D affines.
"""
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying `Affine2DBase` *a* then `Affine2DBase` *b*.
You will generally not call this constructor directly but write ``a +
b`` instead, which will automatically choose the best kind of composite
transform instance to create.
"""
if not a.is_affine or not b.is_affine:
raise ValueError("'a' and 'b' must be affine transforms")
if a.output_dims != b.input_dims:
raise ValueError("The output dimension of 'a' must be equal to "
"the input dimensions of 'b'")
self.input_dims = a.input_dims
self.output_dims = b.output_dims
super().__init__(**kwargs)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
def depth(self):
return self._a.depth + self._b.depth
def _iter_break_from_left_to_right(self):
for left, right in self._a._iter_break_from_left_to_right():
yield left, right + self._b
for left, right in self._b._iter_break_from_left_to_right():
yield self._a + left, right
__str__ = _make_str_method("_a", "_b")
def get_matrix(self):
# docstring inherited
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
The provided code snippet includes necessary dependencies for implementing the `composite_transform_factory` function. Write a Python function `def composite_transform_factory(a, b)` to solve the following problem:
Create a new composite transform that is the result of applying transform a then transform b. Shortcut versions of the blended transform are provided for the case where both child transforms are affine, or one or the other is the identity transform. Composite transforms may also be created using the '+' operator, e.g.:: c = a + b
Here is the function:
def composite_transform_factory(a, b):
"""
Create a new composite transform that is the result of applying
transform a then transform b.
Shortcut versions of the blended transform are provided for the
case where both child transforms are affine, or one or the other
is the identity transform.
Composite transforms may also be created using the '+' operator,
e.g.::
c = a + b
"""
# check to see if any of a or b are IdentityTransforms. We use
# isinstance here to guarantee that the transforms will *always*
# be IdentityTransforms. Since TransformWrappers are mutable,
# use of equality here would be wrong.
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, Affine2D) and isinstance(b, Affine2D):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b) | Create a new composite transform that is the result of applying transform a then transform b. Shortcut versions of the blended transform are provided for the case where both child transforms are affine, or one or the other is the identity transform. Composite transforms may also be created using the '+' operator, e.g.:: c = a + b |
170,949 | import copy
import functools
import textwrap
import weakref
import math
import numpy as np
from numpy.linalg import inv
from matplotlib import _api
from matplotlib._path import (
affine_transform, count_bboxes_overlapping_bbox, update_path_extents)
from .path import Path
The provided code snippet includes necessary dependencies for implementing the `nonsingular` function. Write a Python function `def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True)` to solve the following problem:
Modify the endpoints of a range as needed to avoid singularities. Parameters ---------- vmin, vmax : float The initial endpoints. expander : float, default: 0.001 Fractional amount by which *vmin* and *vmax* are expanded if the original interval is too small, based on *tiny*. tiny : float, default: 1e-15 Threshold for the ratio of the interval to the maximum absolute value of its endpoints. If the interval is smaller than this, it will be expanded. This value should be around 1e-15 or larger; otherwise the interval will be approaching the double precision resolution limit. increasing : bool, default: True If True, swap *vmin*, *vmax* if *vmin* > *vmax*. Returns ------- vmin, vmax : float Endpoints, expanded and/or swapped if necessary. If either input is inf or NaN, or if both inputs are 0 or very close to zero, it returns -*expander*, *expander*.
Here is the function:
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
"""
Modify the endpoints of a range as needed to avoid singularities.
Parameters
----------
vmin, vmax : float
The initial endpoints.
expander : float, default: 0.001
Fractional amount by which *vmin* and *vmax* are expanded if
the original interval is too small, based on *tiny*.
tiny : float, default: 1e-15
Threshold for the ratio of the interval to the maximum absolute
value of its endpoints. If the interval is smaller than
this, it will be expanded. This value should be around
1e-15 or larger; otherwise the interval will be approaching
the double precision resolution limit.
increasing : bool, default: True
If True, swap *vmin*, *vmax* if *vmin* > *vmax*.
Returns
-------
vmin, vmax : float
Endpoints, expanded and/or swapped if necessary.
If either input is inf or NaN, or if both inputs are 0 or very
close to zero, it returns -*expander*, *expander*.
"""
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
# Expand vmin, vmax to float: if they were integer types, they can wrap
# around in abs (abs(np.int8(-128)) == -128) and vmax - vmin can overflow.
vmin, vmax = map(float, [vmin, vmax])
maxabsvalue = max(abs(vmin), abs(vmax))
if maxabsvalue < (1e6 / tiny) * np.finfo(float).tiny:
vmin = -expander
vmax = expander
elif vmax - vmin <= maxabsvalue * tiny:
if vmax == 0 and vmin == 0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax | Modify the endpoints of a range as needed to avoid singularities. Parameters ---------- vmin, vmax : float The initial endpoints. expander : float, default: 0.001 Fractional amount by which *vmin* and *vmax* are expanded if the original interval is too small, based on *tiny*. tiny : float, default: 1e-15 Threshold for the ratio of the interval to the maximum absolute value of its endpoints. If the interval is smaller than this, it will be expanded. This value should be around 1e-15 or larger; otherwise the interval will be approaching the double precision resolution limit. increasing : bool, default: True If True, swap *vmin*, *vmax* if *vmin* > *vmax*. Returns ------- vmin, vmax : float Endpoints, expanded and/or swapped if necessary. If either input is inf or NaN, or if both inputs are 0 or very close to zero, it returns -*expander*, *expander*. |
170,950 | import copy
import functools
import textwrap
import weakref
import math
import numpy as np
from numpy.linalg import inv
from matplotlib import _api
from matplotlib._path import (
affine_transform, count_bboxes_overlapping_bbox, update_path_extents)
from .path import Path
The provided code snippet includes necessary dependencies for implementing the `interval_contains` function. Write a Python function `def interval_contains(interval, val)` to solve the following problem:
Check, inclusively, whether an interval includes a given value. Parameters ---------- interval : (float, float) The endpoints of the interval. val : float Value to check is within interval. Returns ------- bool Whether *val* is within the *interval*.
Here is the function:
def interval_contains(interval, val):
"""
Check, inclusively, whether an interval includes a given value.
Parameters
----------
interval : (float, float)
The endpoints of the interval.
val : float
Value to check is within interval.
Returns
-------
bool
Whether *val* is within the *interval*.
"""
a, b = interval
if a > b:
a, b = b, a
return a <= val <= b | Check, inclusively, whether an interval includes a given value. Parameters ---------- interval : (float, float) The endpoints of the interval. val : float Value to check is within interval. Returns ------- bool Whether *val* is within the *interval*. |
170,951 | import copy
import functools
import textwrap
import weakref
import math
import numpy as np
from numpy.linalg import inv
from matplotlib import _api
from matplotlib._path import (
affine_transform, count_bboxes_overlapping_bbox, update_path_extents)
from .path import Path
The provided code snippet includes necessary dependencies for implementing the `_interval_contains_close` function. Write a Python function `def _interval_contains_close(interval, val, rtol=1e-10)` to solve the following problem:
Check, inclusively, whether an interval includes a given value, with the interval expanded by a small tolerance to admit floating point errors. Parameters ---------- interval : (float, float) The endpoints of the interval. val : float Value to check is within interval. rtol : float, default: 1e-10 Relative tolerance slippage allowed outside of the interval. For an interval ``[a, b]``, values ``a - rtol * (b - a) <= val <= b + rtol * (b - a)`` are considered inside the interval. Returns ------- bool Whether *val* is within the *interval* (with tolerance).
Here is the function:
def _interval_contains_close(interval, val, rtol=1e-10):
"""
Check, inclusively, whether an interval includes a given value, with the
interval expanded by a small tolerance to admit floating point errors.
Parameters
----------
interval : (float, float)
The endpoints of the interval.
val : float
Value to check is within interval.
rtol : float, default: 1e-10
Relative tolerance slippage allowed outside of the interval.
For an interval ``[a, b]``, values
``a - rtol * (b - a) <= val <= b + rtol * (b - a)`` are considered
inside the interval.
Returns
-------
bool
Whether *val* is within the *interval* (with tolerance).
"""
a, b = interval
if a > b:
a, b = b, a
rtol = (b - a) * rtol
return a - rtol <= val <= b + rtol | Check, inclusively, whether an interval includes a given value, with the interval expanded by a small tolerance to admit floating point errors. Parameters ---------- interval : (float, float) The endpoints of the interval. val : float Value to check is within interval. rtol : float, default: 1e-10 Relative tolerance slippage allowed outside of the interval. For an interval ``[a, b]``, values ``a - rtol * (b - a) <= val <= b + rtol * (b - a)`` are considered inside the interval. Returns ------- bool Whether *val* is within the *interval* (with tolerance). |
170,952 | import copy
import functools
import textwrap
import weakref
import math
import numpy as np
from numpy.linalg import inv
from matplotlib import _api
from matplotlib._path import (
affine_transform, count_bboxes_overlapping_bbox, update_path_extents)
from .path import Path
The provided code snippet includes necessary dependencies for implementing the `interval_contains_open` function. Write a Python function `def interval_contains_open(interval, val)` to solve the following problem:
Check, excluding endpoints, whether an interval includes a given value. Parameters ---------- interval : (float, float) The endpoints of the interval. val : float Value to check is within interval. Returns ------- bool Whether *val* is within the *interval*.
Here is the function:
def interval_contains_open(interval, val):
"""
Check, excluding endpoints, whether an interval includes a given value.
Parameters
----------
interval : (float, float)
The endpoints of the interval.
val : float
Value to check is within interval.
Returns
-------
bool
Whether *val* is within the *interval*.
"""
a, b = interval
return a < val < b or a > val > b | Check, excluding endpoints, whether an interval includes a given value. Parameters ---------- interval : (float, float) The endpoints of the interval. val : float Value to check is within interval. Returns ------- bool Whether *val* is within the *interval*. |
170,953 | import copy
import functools
import textwrap
import weakref
import math
import numpy as np
from numpy.linalg import inv
from matplotlib import _api
from matplotlib._path import (
affine_transform, count_bboxes_overlapping_bbox, update_path_extents)
from .path import Path
class Affine2D(Affine2DBase):
"""
A mutable 2D affine transformation.
"""
def __init__(self, matrix=None, **kwargs):
"""
Initialize an Affine transform from a 3x3 numpy float array::
a c e
b d f
0 0 1
If *matrix* is None, initialize with the identity transform.
"""
super().__init__(**kwargs)
if matrix is None:
# A bit faster than np.identity(3).
matrix = IdentityTransform._mtx
self._mtx = matrix.copy()
self._invalid = 0
_base_str = _make_str_method("_mtx")
def __str__(self):
return (self._base_str()
if (self._mtx != np.diag(np.diag(self._mtx))).any()
else f"Affine2D().scale({self._mtx[0, 0]}, {self._mtx[1, 1]})"
if self._mtx[0, 0] != self._mtx[1, 1]
else f"Affine2D().scale({self._mtx[0, 0]})")
def from_values(a, b, c, d, e, f):
"""
Create a new Affine2D instance from the given values::
a c e
b d f
0 0 1
.
"""
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], float).reshape((3, 3)))
def get_matrix(self):
"""
Get the underlying transformation matrix as a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
if self._invalid:
self._inverted = None
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
"""
Set the underlying transformation matrix from a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
self._mtx = mtx
self.invalidate()
def set(self, other):
"""
Set this transformation from the frozen copy of another
`Affine2DBase` object.
"""
_api.check_isinstance(Affine2DBase, other=other)
self._mtx = other.get_matrix()
self.invalidate()
def identity():
"""
Return a new `Affine2D` object that is the identity transform.
Unless this transform will be mutated later on, consider using
the faster `IdentityTransform` class instead.
"""
return Affine2D()
def clear(self):
"""
Reset the underlying matrix to the identity transform.
"""
# A bit faster than np.identity(3).
self._mtx = IdentityTransform._mtx.copy()
self.invalidate()
return self
def rotate(self, theta):
"""
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
a = math.cos(theta)
b = math.sin(theta)
mtx = self._mtx
# Operating and assigning one scalar at a time is much faster.
(xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()
# mtx = [[a -b 0], [b a 0], [0 0 1]] * mtx
mtx[0, 0] = a * xx - b * yx
mtx[0, 1] = a * xy - b * yy
mtx[0, 2] = a * x0 - b * y0
mtx[1, 0] = b * xx + a * yx
mtx[1, 1] = b * xy + a * yy
mtx[1, 2] = b * x0 + a * y0
self.invalidate()
return self
def rotate_deg(self, degrees):
"""
Add a rotation (in degrees) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.rotate(math.radians(degrees))
def rotate_around(self, x, y, theta):
"""
Add a rotation (in radians) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
"""
Add a rotation (in degrees) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
# Cast to float to avoid wraparound issues with uint8's
x, y = float(x), float(y)
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
"""
Add a translation in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
self._mtx[0, 2] += tx
self._mtx[1, 2] += ty
self.invalidate()
return self
def scale(self, sx, sy=None):
"""
Add a scale in place.
If *sy* is None, the same scale is applied in both the *x*- and
*y*-directions.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
if sy is None:
sy = sx
# explicit element-wise scaling is fastest
self._mtx[0, 0] *= sx
self._mtx[0, 1] *= sx
self._mtx[0, 2] *= sx
self._mtx[1, 0] *= sy
self._mtx[1, 1] *= sy
self._mtx[1, 2] *= sy
self.invalidate()
return self
def skew(self, xShear, yShear):
"""
Add a skew in place.
*xShear* and *yShear* are the shear angles along the *x*- and
*y*-axes, respectively, in radians.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
rx = math.tan(xShear)
ry = math.tan(yShear)
mtx = self._mtx
# Operating and assigning one scalar at a time is much faster.
(xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()
# mtx = [[1 rx 0], [ry 1 0], [0 0 1]] * mtx
mtx[0, 0] += rx * yx
mtx[0, 1] += rx * yy
mtx[0, 2] += rx * y0
mtx[1, 0] += ry * xx
mtx[1, 1] += ry * xy
mtx[1, 2] += ry * x0
self.invalidate()
return self
def skew_deg(self, xShear, yShear):
"""
Add a skew in place.
*xShear* and *yShear* are the shear angles along the *x*- and
*y*-axes, respectively, in degrees.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.skew(math.radians(xShear), math.radians(yShear))
class ScaledTranslation(Affine2DBase):
"""
A transformation that translates by *xt* and *yt*, after *xt* and *yt*
have been transformed by *scale_trans*.
"""
def __init__(self, xt, yt, scale_trans, **kwargs):
super().__init__(**kwargs)
self._t = (xt, yt)
self._scale_trans = scale_trans
self.set_children(scale_trans)
self._mtx = None
self._inverted = None
__str__ = _make_str_method("_t")
def get_matrix(self):
# docstring inherited
if self._invalid:
# A bit faster than np.identity(3).
self._mtx = IdentityTransform._mtx.copy()
self._mtx[:2, 2] = self._scale_trans.transform(self._t)
self._invalid = 0
self._inverted = None
return self._mtx
The provided code snippet includes necessary dependencies for implementing the `offset_copy` function. Write a Python function `def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches')` to solve the following problem:
Return a new transform with an added offset. Parameters ---------- trans : `Transform` subclass Any transform, to which offset will be applied. fig : `~matplotlib.figure.Figure`, default: None Current figure. It can be None if *units* are 'dots'. x, y : float, default: 0.0 The offset to apply. units : {'inches', 'points', 'dots'}, default: 'inches' Units of the offset. Returns ------- `Transform` subclass Transform with applied offset.
Here is the function:
def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
"""
Return a new transform with an added offset.
Parameters
----------
trans : `Transform` subclass
Any transform, to which offset will be applied.
fig : `~matplotlib.figure.Figure`, default: None
Current figure. It can be None if *units* are 'dots'.
x, y : float, default: 0.0
The offset to apply.
units : {'inches', 'points', 'dots'}, default: 'inches'
Units of the offset.
Returns
-------
`Transform` subclass
Transform with applied offset.
"""
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif units == 'inches':
pass
else:
_api.check_in_list(['dots', 'points', 'inches'], units=units)
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans) | Return a new transform with an added offset. Parameters ---------- trans : `Transform` subclass Any transform, to which offset will be applied. fig : `~matplotlib.figure.Figure`, default: None Current figure. It can be None if *units* are 'dots'. x, y : float, default: 0.0 The offset to apply. units : {'inches', 'points', 'dots'}, default: 'inches' Units of the offset. Returns ------- `Transform` subclass Transform with applied offset. |
170,954 | import copy
from numbers import Integral, Number, Real
import logging
import numpy as np
import matplotlib as mpl
from . import _api, cbook, colors as mcolors, _docstring
from .artist import Artist, allow_rasterization
from .cbook import (
_to_unmasked_float_array, ls_mapper, ls_mapper_r, STEP_LOOKUP_MAP)
from .markers import MarkerStyle
from .path import Path
from .transforms import Bbox, BboxTransformTo, TransformedPath
from ._enums import JoinStyle, CapStyle
from . import _path
from .markers import ( # noqa
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE,
TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN)
ls_mapper = {'-': 'solid', '--': 'dashed', '-.': 'dashdot', ':': 'dotted'}
The provided code snippet includes necessary dependencies for implementing the `_get_dash_pattern` function. Write a Python function `def _get_dash_pattern(style)` to solve the following problem:
Convert linestyle to dash pattern.
Here is the function:
def _get_dash_pattern(style):
"""Convert linestyle to dash pattern."""
# go from short hand -> full strings
if isinstance(style, str):
style = ls_mapper.get(style, style)
# un-dashed styles
if style in ['solid', 'None']:
offset = 0
dashes = None
# dashed styles
elif style in ['dashed', 'dashdot', 'dotted']:
offset = 0
dashes = tuple(mpl.rcParams['lines.{}_pattern'.format(style)])
#
elif isinstance(style, tuple):
offset, dashes = style
if offset is None:
raise ValueError(f'Unrecognized linestyle: {style!r}')
else:
raise ValueError(f'Unrecognized linestyle: {style!r}')
# normalize offset to be positive and shorter than the dash cycle
if dashes is not None:
dsum = sum(dashes)
if dsum:
offset %= dsum
return offset, dashes | Convert linestyle to dash pattern. |
170,955 | import copy
from numbers import Integral, Number, Real
import logging
import numpy as np
import matplotlib as mpl
from . import _api, cbook, colors as mcolors, _docstring
from .artist import Artist, allow_rasterization
from .cbook import (
_to_unmasked_float_array, ls_mapper, ls_mapper_r, STEP_LOOKUP_MAP)
from .markers import MarkerStyle
from .path import Path
from .transforms import Bbox, BboxTransformTo, TransformedPath
from ._enums import JoinStyle, CapStyle
from . import _path
from .markers import ( # noqa
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE,
TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN)
def _scale_dashes(offset, dashes, lw):
if not mpl.rcParams['lines.scale_dashes']:
return offset, dashes
scaled_offset = offset * lw
scaled_dashes = ([x * lw if x is not None else None for x in dashes]
if dashes is not None else None)
return scaled_offset, scaled_dashes | null |
170,956 | import copy
from numbers import Integral, Number, Real
import logging
import numpy as np
import matplotlib as mpl
from . import _api, cbook, colors as mcolors, _docstring
from .artist import Artist, allow_rasterization
from .cbook import (
_to_unmasked_float_array, ls_mapper, ls_mapper_r, STEP_LOOKUP_MAP)
from .markers import MarkerStyle
from .path import Path
from .transforms import Bbox, BboxTransformTo, TransformedPath
from ._enums import JoinStyle, CapStyle
from . import _path
from .markers import ( # noqa
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE,
TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN)
The provided code snippet includes necessary dependencies for implementing the `segment_hits` function. Write a Python function `def segment_hits(cx, cy, x, y, radius)` to solve the following problem:
Return the indices of the segments in the polyline with coordinates (*cx*, *cy*) that are within a distance *radius* of the point (*x*, *y*).
Here is the function:
def segment_hits(cx, cy, x, y, radius):
"""
Return the indices of the segments in the polyline with coordinates (*cx*,
*cy*) that are within a distance *radius* of the point (*x*, *y*).
"""
# Process single points specially
if len(x) <= 1:
res, = np.nonzero((cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2)
return res
# We need to lop the last element off a lot.
xr, yr = x[:-1], y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx, dy = x[1:] - xr, y[1:] - yr
Lnorm_sq = dx ** 2 + dy ** 2 # Possibly want to eliminate Lnorm==0
u = ((cx - xr) * dx + (cy - yr) * dy) / Lnorm_sq
candidates = (u >= 0) & (u <= 1)
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x) ** 2 + (cy - y) ** 2 <= radius ** 2
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px, py = xr + u * dx, yr + u * dy
line_hits = (cx - px) ** 2 + (cy - py) ** 2 <= radius ** 2
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
return np.concatenate((points, lines)) | Return the indices of the segments in the polyline with coordinates (*cx*, *cy*) that are within a distance *radius* of the point (*x*, *y*). |
170,957 | import copy
from numbers import Integral, Number, Real
import logging
import numpy as np
import matplotlib as mpl
from . import _api, cbook, colors as mcolors, _docstring
from .artist import Artist, allow_rasterization
from .cbook import (
_to_unmasked_float_array, ls_mapper, ls_mapper_r, STEP_LOOKUP_MAP)
from .markers import MarkerStyle
from .path import Path
from .transforms import Bbox, BboxTransformTo, TransformedPath
from ._enums import JoinStyle, CapStyle
from . import _path
from .markers import ( # noqa
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN,
CARETLEFTBASE, CARETRIGHTBASE, CARETUPBASE, CARETDOWNBASE,
TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN)
class Real(Complex, SupportsFloat):
def __float__(self) -> float: ...
def __trunc__(self) -> int: ...
if sys.version_info >= (3, 0):
def __floor__(self) -> int: ...
def __ceil__(self) -> int: ...
def __round__(self, ndigits: None = ...) -> int: ...
def __round__(self, ndigits: int) -> Any: ...
def __divmod__(self, other: Any) -> Any: ...
def __rdivmod__(self, other: Any) -> Any: ...
def __floordiv__(self, other: Any) -> int: ...
def __rfloordiv__(self, other: Any) -> int: ...
def __mod__(self, other: Any) -> Any: ...
def __rmod__(self, other: Any) -> Any: ...
def __lt__(self, other: Any) -> bool: ...
def __le__(self, other: Any) -> bool: ...
def __complex__(self) -> complex: ...
def real(self) -> Any: ...
def imag(self) -> Any: ...
def conjugate(self) -> Any: ...
class Integral(Rational):
if sys.version_info >= (3, 0):
def __int__(self) -> int: ...
else:
def __long__(self) -> long: ...
def __index__(self) -> int: ...
def __pow__(self, exponent: Any, modulus: Optional[Any] = ...) -> Any: ...
def __lshift__(self, other: Any) -> Any: ...
def __rlshift__(self, other: Any) -> Any: ...
def __rshift__(self, other: Any) -> Any: ...
def __rrshift__(self, other: Any) -> Any: ...
def __and__(self, other: Any) -> Any: ...
def __rand__(self, other: Any) -> Any: ...
def __xor__(self, other: Any) -> Any: ...
def __rxor__(self, other: Any) -> Any: ...
def __or__(self, other: Any) -> Any: ...
def __ror__(self, other: Any) -> Any: ...
def __invert__(self) -> Any: ...
def __float__(self) -> float: ...
def numerator(self) -> int: ...
def denominator(self) -> int: ...
class Path:
"""
A series of possibly disconnected, possibly closed, line and curve
segments.
The underlying storage is made up of two parallel numpy arrays:
- *vertices*: an Nx2 float array of vertices
- *codes*: an N-length uint8 array of path codes, or None
These two arrays always have the same length in the first
dimension. For example, to represent a cubic curve, you must
provide three vertices and three ``CURVE4`` codes.
The code types are:
- ``STOP`` : 1 vertex (ignored)
A marker for the end of the entire path (currently not required and
ignored)
- ``MOVETO`` : 1 vertex
Pick up the pen and move to the given vertex.
- ``LINETO`` : 1 vertex
Draw a line from the current position to the given vertex.
- ``CURVE3`` : 1 control point, 1 endpoint
Draw a quadratic Bézier curve from the current position, with the given
control point, to the given end point.
- ``CURVE4`` : 2 control points, 1 endpoint
Draw a cubic Bézier curve from the current position, with the given
control points, to the given end point.
- ``CLOSEPOLY`` : 1 vertex (ignored)
Draw a line segment to the start point of the current polyline.
If *codes* is None, it is interpreted as a ``MOVETO`` followed by a series
of ``LINETO``.
Users of Path objects should not access the vertices and codes arrays
directly. Instead, they should use `iter_segments` or `cleaned` to get the
vertex/code pairs. This helps, in particular, to consistently handle the
case of *codes* being None.
Some behavior of Path objects can be controlled by rcParams. See the
rcParams whose keys start with 'path.'.
.. note::
The vertices and codes arrays should be treated as
immutable -- there are a number of optimizations and assumptions
made up front in the constructor that will not change when the
data changes.
"""
code_type = np.uint8
# Path codes
STOP = code_type(0) # 1 vertex
MOVETO = code_type(1) # 1 vertex
LINETO = code_type(2) # 1 vertex
CURVE3 = code_type(3) # 2 vertices
CURVE4 = code_type(4) # 3 vertices
CLOSEPOLY = code_type(79) # 1 vertex
#: A dictionary mapping Path codes to the number of vertices that the
#: code expects.
NUM_VERTICES_FOR_CODE = {STOP: 1,
MOVETO: 1,
LINETO: 1,
CURVE3: 2,
CURVE4: 3,
CLOSEPOLY: 1}
def __init__(self, vertices, codes=None, _interpolation_steps=1,
closed=False, readonly=False):
"""
Create a new path with the given vertices and codes.
Parameters
----------
vertices : (N, 2) array-like
The path vertices, as an array, masked array or sequence of pairs.
Masked values, if any, will be converted to NaNs, which are then
handled correctly by the Agg PathIterator and other consumers of
path data, such as :meth:`iter_segments`.
codes : array-like or None, optional
N-length array of integers representing the codes of the path.
If not None, codes must be the same length as vertices.
If None, *vertices* will be treated as a series of line segments.
_interpolation_steps : int, optional
Used as a hint to certain projections, such as Polar, that this
path should be linearly interpolated immediately before drawing.
This attribute is primarily an implementation detail and is not
intended for public use.
closed : bool, optional
If *codes* is None and closed is True, vertices will be treated as
line segments of a closed polygon. Note that the last vertex will
then be ignored (as the corresponding code will be set to
CLOSEPOLY).
readonly : bool, optional
Makes the path behave in an immutable way and sets the vertices
and codes as read-only arrays.
"""
vertices = _to_unmasked_float_array(vertices)
_api.check_shape((None, 2), vertices=vertices)
if codes is not None:
codes = np.asarray(codes, self.code_type)
if codes.ndim != 1 or len(codes) != len(vertices):
raise ValueError("'codes' must be a 1D list or array with the "
"same length of 'vertices'. "
f"Your vertices have shape {vertices.shape} "
f"but your codes have shape {codes.shape}")
if len(codes) and codes[0] != self.MOVETO:
raise ValueError("The first element of 'code' must be equal "
f"to 'MOVETO' ({self.MOVETO}). "
f"Your first code is {codes[0]}")
elif closed and len(vertices):
codes = np.empty(len(vertices), dtype=self.code_type)
codes[0] = self.MOVETO
codes[1:-1] = self.LINETO
codes[-1] = self.CLOSEPOLY
self._vertices = vertices
self._codes = codes
self._interpolation_steps = _interpolation_steps
self._update_values()
if readonly:
self._vertices.flags.writeable = False
if self._codes is not None:
self._codes.flags.writeable = False
self._readonly = True
else:
self._readonly = False
def _fast_from_codes_and_verts(cls, verts, codes, internals_from=None):
"""
Create a Path instance without the expense of calling the constructor.
Parameters
----------
verts : numpy array
codes : numpy array
internals_from : Path or None
If not None, another `Path` from which the attributes
``should_simplify``, ``simplify_threshold``, and
``interpolation_steps`` will be copied. Note that ``readonly`` is
never copied, and always set to ``False`` by this constructor.
"""
pth = cls.__new__(cls)
pth._vertices = _to_unmasked_float_array(verts)
pth._codes = codes
pth._readonly = False
if internals_from is not None:
pth._should_simplify = internals_from._should_simplify
pth._simplify_threshold = internals_from._simplify_threshold
pth._interpolation_steps = internals_from._interpolation_steps
else:
pth._should_simplify = True
pth._simplify_threshold = mpl.rcParams['path.simplify_threshold']
pth._interpolation_steps = 1
return pth
def _create_closed(cls, vertices):
"""
Create a closed polygonal path going through *vertices*.
Unlike ``Path(..., closed=True)``, *vertices* should **not** end with
an entry for the CLOSEPATH; this entry is added by `._create_closed`.
"""
v = _to_unmasked_float_array(vertices)
return cls(np.concatenate([v, v[:1]]), closed=True)
def _update_values(self):
self._simplify_threshold = mpl.rcParams['path.simplify_threshold']
self._should_simplify = (
self._simplify_threshold > 0 and
mpl.rcParams['path.simplify'] and
len(self._vertices) >= 128 and
(self._codes is None or np.all(self._codes <= Path.LINETO))
)
def vertices(self):
"""
The list of vertices in the `Path` as an Nx2 numpy array.
"""
return self._vertices
def vertices(self, vertices):
if self._readonly:
raise AttributeError("Can't set vertices on a readonly Path")
self._vertices = vertices
self._update_values()
def codes(self):
"""
The list of codes in the `Path` as a 1D numpy array. Each
code is one of `STOP`, `MOVETO`, `LINETO`, `CURVE3`, `CURVE4`
or `CLOSEPOLY`. For codes that correspond to more than one
vertex (`CURVE3` and `CURVE4`), that code will be repeated so
that the length of `vertices` and `codes` is always
the same.
"""
return self._codes
def codes(self, codes):
if self._readonly:
raise AttributeError("Can't set codes on a readonly Path")
self._codes = codes
self._update_values()
def simplify_threshold(self):
"""
The fraction of a pixel difference below which vertices will
be simplified out.
"""
return self._simplify_threshold
def simplify_threshold(self, threshold):
self._simplify_threshold = threshold
def should_simplify(self):
"""
`True` if the vertices array should be simplified.
"""
return self._should_simplify
def should_simplify(self, should_simplify):
self._should_simplify = should_simplify
def readonly(self):
"""
`True` if the `Path` is read-only.
"""
return self._readonly
def copy(self):
"""
Return a shallow copy of the `Path`, which will share the
vertices and codes with the source `Path`.
"""
return copy.copy(self)
def __deepcopy__(self, memo=None):
"""
Return a deepcopy of the `Path`. The `Path` will not be
readonly, even if the source `Path` is.
"""
# Deepcopying arrays (vertices, codes) strips the writeable=False flag.
p = copy.deepcopy(super(), memo)
p._readonly = False
return p
deepcopy = __deepcopy__
def make_compound_path_from_polys(cls, XY):
"""
Make a compound `Path` object to draw a number of polygons with equal
numbers of sides.
.. plot:: gallery/misc/histogram_path.py
Parameters
----------
XY : (numpolys, numsides, 2) array
"""
# for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for
# the CLOSEPOLY; the vert for the closepoly is ignored but we still
# need it to keep the codes aligned with the vertices
numpolys, numsides, two = XY.shape
if two != 2:
raise ValueError("The third dimension of 'XY' must be 2")
stride = numsides + 1
nverts = numpolys * stride
verts = np.zeros((nverts, 2))
codes = np.full(nverts, cls.LINETO, dtype=cls.code_type)
codes[0::stride] = cls.MOVETO
codes[numsides::stride] = cls.CLOSEPOLY
for i in range(numsides):
verts[i::stride] = XY[:, i]
return cls(verts, codes)
def make_compound_path(cls, *args):
"""
Make a compound path from a list of `Path` objects. Blindly removes
all `Path.STOP` control points.
"""
# Handle an empty list in args (i.e. no args).
if not args:
return Path(np.empty([0, 2], dtype=np.float32))
vertices = np.concatenate([x.vertices for x in args])
codes = np.empty(len(vertices), dtype=cls.code_type)
i = 0
for path in args:
if path.codes is None:
codes[i] = cls.MOVETO
codes[i + 1:i + len(path.vertices)] = cls.LINETO
else:
codes[i:i + len(path.codes)] = path.codes
i += len(path.vertices)
# remove STOP's, since internal STOPs are a bug
not_stop_mask = codes != cls.STOP
vertices = vertices[not_stop_mask, :]
codes = codes[not_stop_mask]
return cls(vertices, codes)
def __repr__(self):
return "Path(%r, %r)" % (self.vertices, self.codes)
def __len__(self):
return len(self.vertices)
def iter_segments(self, transform=None, remove_nans=True, clip=None,
snap=False, stroke_width=1.0, simplify=None,
curves=True, sketch=None):
"""
Iterate over all curve segments in the path.
Each iteration returns a pair ``(vertices, code)``, where ``vertices``
is a sequence of 1-3 coordinate pairs, and ``code`` is a `Path` code.
Additionally, this method can provide a number of standard cleanups and
conversions to the path.
Parameters
----------
transform : None or :class:`~matplotlib.transforms.Transform`
If not None, the given affine transformation will be applied to the
path.
remove_nans : bool, optional
Whether to remove all NaNs from the path and skip over them using
MOVETO commands.
clip : None or (float, float, float, float), optional
If not None, must be a four-tuple (x1, y1, x2, y2)
defining a rectangle in which to clip the path.
snap : None or bool, optional
If True, snap all nodes to pixels; if False, don't snap them.
If None, snap if the path contains only segments
parallel to the x or y axes, and no more than 1024 of them.
stroke_width : float, optional
The width of the stroke being drawn (used for path snapping).
simplify : None or bool, optional
Whether to simplify the path by removing vertices
that do not affect its appearance. If None, use the
:attr:`should_simplify` attribute. See also :rc:`path.simplify`
and :rc:`path.simplify_threshold`.
curves : bool, optional
If True, curve segments will be returned as curve segments.
If False, all curves will be converted to line segments.
sketch : None or sequence, optional
If not None, must be a 3-tuple of the form
(scale, length, randomness), representing the sketch parameters.
"""
if not len(self):
return
cleaned = self.cleaned(transform=transform,
remove_nans=remove_nans, clip=clip,
snap=snap, stroke_width=stroke_width,
simplify=simplify, curves=curves,
sketch=sketch)
# Cache these object lookups for performance in the loop.
NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE
STOP = self.STOP
vertices = iter(cleaned.vertices)
codes = iter(cleaned.codes)
for curr_vertices, code in zip(vertices, codes):
if code == STOP:
break
extra_vertices = NUM_VERTICES_FOR_CODE[code] - 1
if extra_vertices:
for i in range(extra_vertices):
next(codes)
curr_vertices = np.append(curr_vertices, next(vertices))
yield curr_vertices, code
def iter_bezier(self, **kwargs):
"""
Iterate over each Bézier curve (lines included) in a Path.
Parameters
----------
**kwargs
Forwarded to `.iter_segments`.
Yields
------
B : matplotlib.bezier.BezierSegment
The Bézier curves that make up the current path. Note in particular
that freestanding points are Bézier curves of order 0, and lines
are Bézier curves of order 1 (with two control points).
code : Path.code_type
The code describing what kind of curve is being returned.
Path.MOVETO, Path.LINETO, Path.CURVE3, Path.CURVE4 correspond to
Bézier curves with 1, 2, 3, and 4 control points (respectively).
Path.CLOSEPOLY is a Path.LINETO with the control points correctly
chosen based on the start/end points of the current stroke.
"""
first_vert = None
prev_vert = None
for verts, code in self.iter_segments(**kwargs):
if first_vert is None:
if code != Path.MOVETO:
raise ValueError("Malformed path, must start with MOVETO.")
if code == Path.MOVETO: # a point is like "CURVE1"
first_vert = verts
yield BezierSegment(np.array([first_vert])), code
elif code == Path.LINETO: # "CURVE2"
yield BezierSegment(np.array([prev_vert, verts])), code
elif code == Path.CURVE3:
yield BezierSegment(np.array([prev_vert, verts[:2],
verts[2:]])), code
elif code == Path.CURVE4:
yield BezierSegment(np.array([prev_vert, verts[:2],
verts[2:4], verts[4:]])), code
elif code == Path.CLOSEPOLY:
yield BezierSegment(np.array([prev_vert, first_vert])), code
elif code == Path.STOP:
return
else:
raise ValueError(f"Invalid Path.code_type: {code}")
prev_vert = verts[-2:]
def cleaned(self, transform=None, remove_nans=False, clip=None,
*, simplify=False, curves=False,
stroke_width=1.0, snap=False, sketch=None):
"""
Return a new Path with vertices and codes cleaned according to the
parameters.
See Also
--------
Path.iter_segments : for details of the keyword arguments.
"""
vertices, codes = _path.cleanup_path(
self, transform, remove_nans, clip, snap, stroke_width, simplify,
curves, sketch)
pth = Path._fast_from_codes_and_verts(vertices, codes, self)
if not simplify:
pth._should_simplify = False
return pth
def transformed(self, transform):
"""
Return a transformed copy of the path.
See Also
--------
matplotlib.transforms.TransformedPath
A specialized path class that will cache the transformed result and
automatically update when the transform changes.
"""
return Path(transform.transform(self.vertices), self.codes,
self._interpolation_steps)
def contains_point(self, point, transform=None, radius=0.0):
"""
Return whether the area enclosed by the path contains the given point.
The path is always treated as closed; i.e. if the last code is not
CLOSEPOLY an implicit segment connecting the last vertex to the first
vertex is assumed.
Parameters
----------
point : (float, float)
The point (x, y) to check.
transform : `matplotlib.transforms.Transform`, optional
If not ``None``, *point* will be compared to ``self`` transformed
by *transform*; i.e. for a correct check, *transform* should
transform the path into the coordinate system of *point*.
radius : float, default: 0
Additional margin on the path in coordinates of *point*.
The path is extended tangentially by *radius/2*; i.e. if you would
draw the path with a linewidth of *radius*, all points on the line
would still be considered to be contained in the area. Conversely,
negative values shrink the area: Points on the imaginary line
will be considered outside the area.
Returns
-------
bool
Notes
-----
The current algorithm has some limitations:
- The result is undefined for points exactly at the boundary
(i.e. at the path shifted by *radius/2*).
- The result is undefined if there is no enclosed area, i.e. all
vertices are on a straight line.
- If bounding lines start to cross each other due to *radius* shift,
the result is not guaranteed to be correct.
"""
if transform is not None:
transform = transform.frozen()
# `point_in_path` does not handle nonlinear transforms, so we
# transform the path ourselves. If *transform* is affine, letting
# `point_in_path` handle the transform avoids allocating an extra
# buffer.
if transform and not transform.is_affine:
self = transform.transform_path(self)
transform = None
return _path.point_in_path(point[0], point[1], radius, self, transform)
def contains_points(self, points, transform=None, radius=0.0):
"""
Return whether the area enclosed by the path contains the given points.
The path is always treated as closed; i.e. if the last code is not
CLOSEPOLY an implicit segment connecting the last vertex to the first
vertex is assumed.
Parameters
----------
points : (N, 2) array
The points to check. Columns contain x and y values.
transform : `matplotlib.transforms.Transform`, optional
If not ``None``, *points* will be compared to ``self`` transformed
by *transform*; i.e. for a correct check, *transform* should
transform the path into the coordinate system of *points*.
radius : float, default: 0
Additional margin on the path in coordinates of *points*.
The path is extended tangentially by *radius/2*; i.e. if you would
draw the path with a linewidth of *radius*, all points on the line
would still be considered to be contained in the area. Conversely,
negative values shrink the area: Points on the imaginary line
will be considered outside the area.
Returns
-------
length-N bool array
Notes
-----
The current algorithm has some limitations:
- The result is undefined for points exactly at the boundary
(i.e. at the path shifted by *radius/2*).
- The result is undefined if there is no enclosed area, i.e. all
vertices are on a straight line.
- If bounding lines start to cross each other due to *radius* shift,
the result is not guaranteed to be correct.
"""
if transform is not None:
transform = transform.frozen()
result = _path.points_in_path(points, radius, self, transform)
return result.astype('bool')
def contains_path(self, path, transform=None):
"""
Return whether this (closed) path completely contains the given path.
If *transform* is not ``None``, the path will be transformed before
checking for containment.
"""
if transform is not None:
transform = transform.frozen()
return _path.path_in_path(self, None, path, transform)
def get_extents(self, transform=None, **kwargs):
"""
Get Bbox of the path.
Parameters
----------
transform : matplotlib.transforms.Transform, optional
Transform to apply to path before computing extents, if any.
**kwargs
Forwarded to `.iter_bezier`.
Returns
-------
matplotlib.transforms.Bbox
The extents of the path Bbox([[xmin, ymin], [xmax, ymax]])
"""
from .transforms import Bbox
if transform is not None:
self = transform.transform_path(self)
if self.codes is None:
xys = self.vertices
elif len(np.intersect1d(self.codes, [Path.CURVE3, Path.CURVE4])) == 0:
# Optimization for the straight line case.
# Instead of iterating through each curve, consider
# each line segment's end-points
# (recall that STOP and CLOSEPOLY vertices are ignored)
xys = self.vertices[np.isin(self.codes,
[Path.MOVETO, Path.LINETO])]
else:
xys = []
for curve, code in self.iter_bezier(**kwargs):
# places where the derivative is zero can be extrema
_, dzeros = curve.axis_aligned_extrema()
# as can the ends of the curve
xys.append(curve([0, *dzeros, 1]))
xys = np.concatenate(xys)
if len(xys):
return Bbox([xys.min(axis=0), xys.max(axis=0)])
else:
return Bbox.null()
def intersects_path(self, other, filled=True):
"""
Return whether if this path intersects another given path.
If *filled* is True, then this also returns True if one path completely
encloses the other (i.e., the paths are treated as filled).
"""
return _path.path_intersects_path(self, other, filled)
def intersects_bbox(self, bbox, filled=True):
"""
Return whether this path intersects a given `~.transforms.Bbox`.
If *filled* is True, then this also returns True if the path completely
encloses the `.Bbox` (i.e., the path is treated as filled).
The bounding box is always considered filled.
"""
return _path.path_intersects_rectangle(
self, bbox.x0, bbox.y0, bbox.x1, bbox.y1, filled)
def interpolated(self, steps):
"""
Return a new path resampled to length N x steps.
Codes other than LINETO are not handled correctly.
"""
if steps == 1:
return self
vertices = simple_linear_interpolation(self.vertices, steps)
codes = self.codes
if codes is not None:
new_codes = np.full((len(codes) - 1) * steps + 1, Path.LINETO,
dtype=self.code_type)
new_codes[0::steps] = codes
else:
new_codes = None
return Path(vertices, new_codes)
def to_polygons(self, transform=None, width=0, height=0, closed_only=True):
"""
Convert this path to a list of polygons or polylines. Each
polygon/polyline is an Nx2 array of vertices. In other words,
each polygon has no ``MOVETO`` instructions or curves. This
is useful for displaying in backends that do not support
compound paths or Bézier curves.
If *width* and *height* are both non-zero then the lines will
be simplified so that vertices outside of (0, 0), (width,
height) will be clipped.
If *closed_only* is `True` (default), only closed polygons,
with the last point being the same as the first point, will be
returned. Any unclosed polylines in the path will be
explicitly closed. If *closed_only* is `False`, any unclosed
polygons in the path will be returned as unclosed polygons,
and the closed polygons will be returned explicitly closed by
setting the last point to the same as the first point.
"""
if len(self.vertices) == 0:
return []
if transform is not None:
transform = transform.frozen()
if self.codes is None and (width == 0 or height == 0):
vertices = self.vertices
if closed_only:
if len(vertices) < 3:
return []
elif np.any(vertices[0] != vertices[-1]):
vertices = [*vertices, vertices[0]]
if transform is None:
return [vertices]
else:
return [transform.transform(vertices)]
# Deal with the case where there are curves and/or multiple
# subpaths (using extension code)
return _path.convert_path_to_polygons(
self, transform, width, height, closed_only)
_unit_rectangle = None
def unit_rectangle(cls):
"""
Return a `Path` instance of the unit rectangle from (0, 0) to (1, 1).
"""
if cls._unit_rectangle is None:
cls._unit_rectangle = cls([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]],
closed=True, readonly=True)
return cls._unit_rectangle
_unit_regular_polygons = WeakValueDictionary()
def unit_regular_polygon(cls, numVertices):
"""
Return a :class:`Path` instance for a unit regular polygon with the
given *numVertices* such that the circumscribing circle has radius 1.0,
centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_polygons.get(numVertices)
else:
path = None
if path is None:
theta = ((2 * np.pi / numVertices) * np.arange(numVertices + 1)
# This initial rotation is to make sure the polygon always
# "points-up".
+ np.pi / 2)
verts = np.column_stack((np.cos(theta), np.sin(theta)))
path = cls(verts, closed=True, readonly=True)
if numVertices <= 16:
cls._unit_regular_polygons[numVertices] = path
return path
_unit_regular_stars = WeakValueDictionary()
def unit_regular_star(cls, numVertices, innerCircle=0.5):
"""
Return a :class:`Path` for a unit regular star with the given
numVertices and radius of 1.0, centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_stars.get((numVertices, innerCircle))
else:
path = None
if path is None:
ns2 = numVertices * 2
theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
r = np.ones(ns2 + 1)
r[1::2] = innerCircle
verts = (r * np.vstack((np.cos(theta), np.sin(theta)))).T
path = cls(verts, closed=True, readonly=True)
if numVertices <= 16:
cls._unit_regular_stars[(numVertices, innerCircle)] = path
return path
def unit_regular_asterisk(cls, numVertices):
"""
Return a :class:`Path` for a unit regular asterisk with the given
numVertices and radius of 1.0, centered at (0, 0).
"""
return cls.unit_regular_star(numVertices, 0.0)
_unit_circle = None
def unit_circle(cls):
"""
Return the readonly :class:`Path` of the unit circle.
For most cases, :func:`Path.circle` will be what you want.
"""
if cls._unit_circle is None:
cls._unit_circle = cls.circle(center=(0, 0), radius=1,
readonly=True)
return cls._unit_circle
def circle(cls, center=(0., 0.), radius=1., readonly=False):
"""
Return a `Path` representing a circle of a given radius and center.
Parameters
----------
center : (float, float), default: (0, 0)
The center of the circle.
radius : float, default: 1
The radius of the circle.
readonly : bool
Whether the created path should have the "readonly" argument
set when creating the Path instance.
Notes
-----
The circle is approximated using 8 cubic Bézier curves, as described in
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <https://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = SQRTHALF * MAGIC
vertices = np.array([[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[-MAGIC, 1.0],
[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
[-SQRTHALF, SQRTHALF],
[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
[-1.0, MAGIC],
[-1.0, 0.0],
[-1.0, -MAGIC],
[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
[-SQRTHALF, -SQRTHALF],
[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
[-MAGIC, -1.0],
[0.0, -1.0],
[0.0, -1.0]],
dtype=float)
codes = [cls.CURVE4] * 26
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
return Path(vertices * radius + center, codes, readonly=readonly)
_unit_circle_righthalf = None
def unit_circle_righthalf(cls):
"""
Return a `Path` of the right half of a unit circle.
See `Path.circle` for the reference on the approximation used.
"""
if cls._unit_circle_righthalf is None:
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = SQRTHALF * MAGIC
vertices = np.array(
[[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[0.0, -1.0]],
float)
codes = np.full(14, cls.CURVE4, dtype=cls.code_type)
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
cls._unit_circle_righthalf = cls(vertices, codes, readonly=True)
return cls._unit_circle_righthalf
def arc(cls, theta1, theta2, n=None, is_wedge=False):
"""
Return a `Path` for the unit circle arc from angles *theta1* to
*theta2* (in degrees).
*theta2* is unwrapped to produce the shortest arc within 360 degrees.
That is, if *theta2* > *theta1* + 360, the arc will be from *theta1* to
*theta2* - 360 and not a full circle plus some extra overlap.
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
Masionobe, L. 2003. `Drawing an elliptical arc using
polylines, quadratic or cubic Bezier curves
<https://web.archive.org/web/20190318044212/http://www.spaceroots.org/documents/ellipse/index.html>`_.
"""
halfpi = np.pi * 0.5
eta1 = theta1
eta2 = theta2 - 360 * np.floor((theta2 - theta1) / 360)
# Ensure 2pi range is not flattened to 0 due to floating-point errors,
# but don't try to expand existing 0 range.
if theta2 != theta1 and eta2 <= eta1:
eta2 += 360
eta1, eta2 = np.deg2rad([eta1, eta2])
# number of curve segments to make
if n is None:
n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
if n < 1:
raise ValueError("n must be >= 1 or None")
deta = (eta2 - eta1) / n
t = np.tan(0.5 * deta)
alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
steps = np.linspace(eta1, eta2, n + 1, True)
cos_eta = np.cos(steps)
sin_eta = np.sin(steps)
xA = cos_eta[:-1]
yA = sin_eta[:-1]
xA_dot = -yA
yA_dot = xA
xB = cos_eta[1:]
yB = sin_eta[1:]
xB_dot = -yB
yB_dot = xB
if is_wedge:
length = n * 3 + 4
vertices = np.zeros((length, 2), float)
codes = np.full(length, cls.CURVE4, dtype=cls.code_type)
vertices[1] = [xA[0], yA[0]]
codes[0:2] = [cls.MOVETO, cls.LINETO]
codes[-2:] = [cls.LINETO, cls.CLOSEPOLY]
vertex_offset = 2
end = length - 2
else:
length = n * 3 + 1
vertices = np.empty((length, 2), float)
codes = np.full(length, cls.CURVE4, dtype=cls.code_type)
vertices[0] = [xA[0], yA[0]]
codes[0] = cls.MOVETO
vertex_offset = 1
end = length
vertices[vertex_offset:end:3, 0] = xA + alpha * xA_dot
vertices[vertex_offset:end:3, 1] = yA + alpha * yA_dot
vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
vertices[vertex_offset+2:end:3, 0] = xB
vertices[vertex_offset+2:end:3, 1] = yB
return cls(vertices, codes, readonly=True)
def wedge(cls, theta1, theta2, n=None):
"""
Return a `Path` for the unit circle wedge from angles *theta1* to
*theta2* (in degrees).
*theta2* is unwrapped to produce the shortest wedge within 360 degrees.
That is, if *theta2* > *theta1* + 360, the wedge will be from *theta1*
to *theta2* - 360 and not a full circle plus some extra overlap.
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
See `Path.arc` for the reference on the approximation used.
"""
return cls.arc(theta1, theta2, n, True)
def hatch(hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates a Path that
can be used in a repeated hatching pattern. *density* is the
number of lines per unit square.
"""
from matplotlib.hatch import get_path
return (get_path(hatchpattern, density)
if hatchpattern is not None else None)
def clip_to_bbox(self, bbox, inside=True):
"""
Clip the path to the given bounding box.
The path must be made up of one or more closed polygons. This
algorithm will not behave correctly for unclosed paths.
If *inside* is `True`, clip to the inside of the box, otherwise
to the outside of the box.
"""
verts = _path.clip_path_to_rect(self, bbox, inside)
paths = [Path(poly) for poly in verts]
return self.make_compound_path(*paths)
The provided code snippet includes necessary dependencies for implementing the `_mark_every_path` function. Write a Python function `def _mark_every_path(markevery, tpath, affine, ax)` to solve the following problem:
Helper function that sorts out how to deal the input `markevery` and returns the points where markers should be drawn. Takes in the `markevery` value and the line path and returns the sub-sampled path.
Here is the function:
def _mark_every_path(markevery, tpath, affine, ax):
"""
Helper function that sorts out how to deal the input
`markevery` and returns the points where markers should be drawn.
Takes in the `markevery` value and the line path and returns the
sub-sampled path.
"""
# pull out the two bits of data we want from the path
codes, verts = tpath.codes, tpath.vertices
def _slice_or_none(in_v, slc):
"""Helper function to cope with `codes` being an ndarray or `None`."""
if in_v is None:
return None
return in_v[slc]
# if just an int, assume starting at 0 and make a tuple
if isinstance(markevery, Integral):
markevery = (0, markevery)
# if just a float, assume starting at 0.0 and make a tuple
elif isinstance(markevery, Real):
markevery = (0.0, markevery)
if isinstance(markevery, tuple):
if len(markevery) != 2:
raise ValueError('`markevery` is a tuple but its len is not 2; '
'markevery={}'.format(markevery))
start, step = markevery
# if step is an int, old behavior
if isinstance(step, Integral):
# tuple of 2 int is for backwards compatibility,
if not isinstance(start, Integral):
raise ValueError(
'`markevery` is a tuple with len 2 and second element is '
'an int, but the first element is not an int; markevery={}'
.format(markevery))
# just return, we are done here
return Path(verts[slice(start, None, step)],
_slice_or_none(codes, slice(start, None, step)))
elif isinstance(step, Real):
if not isinstance(start, Real):
raise ValueError(
'`markevery` is a tuple with len 2 and second element is '
'a float, but the first element is not a float or an int; '
'markevery={}'.format(markevery))
if ax is None:
raise ValueError(
"markevery is specified relative to the axes size, but "
"the line does not have a Axes as parent")
# calc cumulative distance along path (in display coords):
fin = np.isfinite(verts).all(axis=1)
fverts = verts[fin]
disp_coords = affine.transform(fverts)
delta = np.empty((len(disp_coords), 2))
delta[0, :] = 0
delta[1:, :] = disp_coords[1:, :] - disp_coords[:-1, :]
delta = np.hypot(*delta.T).cumsum()
# calc distance between markers along path based on the axes
# bounding box diagonal being a distance of unity:
(x0, y0), (x1, y1) = ax.transAxes.transform([[0, 0], [1, 1]])
scale = np.hypot(x1 - x0, y1 - y0)
marker_delta = np.arange(start * scale, delta[-1], step * scale)
# find closest actual data point that is closest to
# the theoretical distance along the path:
inds = np.abs(delta[np.newaxis, :] - marker_delta[:, np.newaxis])
inds = inds.argmin(axis=1)
inds = np.unique(inds)
# return, we are done here
return Path(fverts[inds], _slice_or_none(codes, inds))
else:
raise ValueError(
f"markevery={markevery!r} is a tuple with len 2, but its "
f"second element is not an int or a float")
elif isinstance(markevery, slice):
# mazol tov, it's already a slice, just return
return Path(verts[markevery], _slice_or_none(codes, markevery))
elif np.iterable(markevery):
# fancy indexing
try:
return Path(verts[markevery], _slice_or_none(codes, markevery))
except (ValueError, IndexError) as err:
raise ValueError(
f"markevery={markevery!r} is iterable but not a valid numpy "
f"fancy index") from err
else:
raise ValueError(f"markevery={markevery!r} is not a recognized value") | Helper function that sorts out how to deal the input `markevery` and returns the points where markers should be drawn. Takes in the `markevery` value and the line path and returns the sub-sampled path. |
170,958 | import numpy as np
from matplotlib import _api
from matplotlib.path import Path
def _validate_hatch_pattern(hatch):
valid_hatch_patterns = set(r'-+|/\xXoO.*')
if hatch is not None:
invalids = set(hatch).difference(valid_hatch_patterns)
if invalids:
valid = ''.join(sorted(valid_hatch_patterns))
invalids = ''.join(sorted(invalids))
_api.warn_deprecated(
'3.4',
removal='3.8', # one release after custom hatches (#20690)
message=f'hatch must consist of a string of "{valid}" or '
'None, but found the following invalid values '
f'"{invalids}". Passing invalid values is deprecated '
'since %(since)s and will become an error %(removal)s.'
) | null |
170,959 | import numpy as np
from matplotlib import _api
from matplotlib.path import Path
_hatch_types = [
HorizontalHatch,
VerticalHatch,
NorthEastHatch,
SouthEastHatch,
SmallCircles,
LargeCircles,
SmallFilledCircles,
Stars
]
class Path:
"""
A series of possibly disconnected, possibly closed, line and curve
segments.
The underlying storage is made up of two parallel numpy arrays:
- *vertices*: an Nx2 float array of vertices
- *codes*: an N-length uint8 array of path codes, or None
These two arrays always have the same length in the first
dimension. For example, to represent a cubic curve, you must
provide three vertices and three ``CURVE4`` codes.
The code types are:
- ``STOP`` : 1 vertex (ignored)
A marker for the end of the entire path (currently not required and
ignored)
- ``MOVETO`` : 1 vertex
Pick up the pen and move to the given vertex.
- ``LINETO`` : 1 vertex
Draw a line from the current position to the given vertex.
- ``CURVE3`` : 1 control point, 1 endpoint
Draw a quadratic Bézier curve from the current position, with the given
control point, to the given end point.
- ``CURVE4`` : 2 control points, 1 endpoint
Draw a cubic Bézier curve from the current position, with the given
control points, to the given end point.
- ``CLOSEPOLY`` : 1 vertex (ignored)
Draw a line segment to the start point of the current polyline.
If *codes* is None, it is interpreted as a ``MOVETO`` followed by a series
of ``LINETO``.
Users of Path objects should not access the vertices and codes arrays
directly. Instead, they should use `iter_segments` or `cleaned` to get the
vertex/code pairs. This helps, in particular, to consistently handle the
case of *codes* being None.
Some behavior of Path objects can be controlled by rcParams. See the
rcParams whose keys start with 'path.'.
.. note::
The vertices and codes arrays should be treated as
immutable -- there are a number of optimizations and assumptions
made up front in the constructor that will not change when the
data changes.
"""
code_type = np.uint8
# Path codes
STOP = code_type(0) # 1 vertex
MOVETO = code_type(1) # 1 vertex
LINETO = code_type(2) # 1 vertex
CURVE3 = code_type(3) # 2 vertices
CURVE4 = code_type(4) # 3 vertices
CLOSEPOLY = code_type(79) # 1 vertex
#: A dictionary mapping Path codes to the number of vertices that the
#: code expects.
NUM_VERTICES_FOR_CODE = {STOP: 1,
MOVETO: 1,
LINETO: 1,
CURVE3: 2,
CURVE4: 3,
CLOSEPOLY: 1}
def __init__(self, vertices, codes=None, _interpolation_steps=1,
closed=False, readonly=False):
"""
Create a new path with the given vertices and codes.
Parameters
----------
vertices : (N, 2) array-like
The path vertices, as an array, masked array or sequence of pairs.
Masked values, if any, will be converted to NaNs, which are then
handled correctly by the Agg PathIterator and other consumers of
path data, such as :meth:`iter_segments`.
codes : array-like or None, optional
N-length array of integers representing the codes of the path.
If not None, codes must be the same length as vertices.
If None, *vertices* will be treated as a series of line segments.
_interpolation_steps : int, optional
Used as a hint to certain projections, such as Polar, that this
path should be linearly interpolated immediately before drawing.
This attribute is primarily an implementation detail and is not
intended for public use.
closed : bool, optional
If *codes* is None and closed is True, vertices will be treated as
line segments of a closed polygon. Note that the last vertex will
then be ignored (as the corresponding code will be set to
CLOSEPOLY).
readonly : bool, optional
Makes the path behave in an immutable way and sets the vertices
and codes as read-only arrays.
"""
vertices = _to_unmasked_float_array(vertices)
_api.check_shape((None, 2), vertices=vertices)
if codes is not None:
codes = np.asarray(codes, self.code_type)
if codes.ndim != 1 or len(codes) != len(vertices):
raise ValueError("'codes' must be a 1D list or array with the "
"same length of 'vertices'. "
f"Your vertices have shape {vertices.shape} "
f"but your codes have shape {codes.shape}")
if len(codes) and codes[0] != self.MOVETO:
raise ValueError("The first element of 'code' must be equal "
f"to 'MOVETO' ({self.MOVETO}). "
f"Your first code is {codes[0]}")
elif closed and len(vertices):
codes = np.empty(len(vertices), dtype=self.code_type)
codes[0] = self.MOVETO
codes[1:-1] = self.LINETO
codes[-1] = self.CLOSEPOLY
self._vertices = vertices
self._codes = codes
self._interpolation_steps = _interpolation_steps
self._update_values()
if readonly:
self._vertices.flags.writeable = False
if self._codes is not None:
self._codes.flags.writeable = False
self._readonly = True
else:
self._readonly = False
def _fast_from_codes_and_verts(cls, verts, codes, internals_from=None):
"""
Create a Path instance without the expense of calling the constructor.
Parameters
----------
verts : numpy array
codes : numpy array
internals_from : Path or None
If not None, another `Path` from which the attributes
``should_simplify``, ``simplify_threshold``, and
``interpolation_steps`` will be copied. Note that ``readonly`` is
never copied, and always set to ``False`` by this constructor.
"""
pth = cls.__new__(cls)
pth._vertices = _to_unmasked_float_array(verts)
pth._codes = codes
pth._readonly = False
if internals_from is not None:
pth._should_simplify = internals_from._should_simplify
pth._simplify_threshold = internals_from._simplify_threshold
pth._interpolation_steps = internals_from._interpolation_steps
else:
pth._should_simplify = True
pth._simplify_threshold = mpl.rcParams['path.simplify_threshold']
pth._interpolation_steps = 1
return pth
def _create_closed(cls, vertices):
"""
Create a closed polygonal path going through *vertices*.
Unlike ``Path(..., closed=True)``, *vertices* should **not** end with
an entry for the CLOSEPATH; this entry is added by `._create_closed`.
"""
v = _to_unmasked_float_array(vertices)
return cls(np.concatenate([v, v[:1]]), closed=True)
def _update_values(self):
self._simplify_threshold = mpl.rcParams['path.simplify_threshold']
self._should_simplify = (
self._simplify_threshold > 0 and
mpl.rcParams['path.simplify'] and
len(self._vertices) >= 128 and
(self._codes is None or np.all(self._codes <= Path.LINETO))
)
def vertices(self):
"""
The list of vertices in the `Path` as an Nx2 numpy array.
"""
return self._vertices
def vertices(self, vertices):
if self._readonly:
raise AttributeError("Can't set vertices on a readonly Path")
self._vertices = vertices
self._update_values()
def codes(self):
"""
The list of codes in the `Path` as a 1D numpy array. Each
code is one of `STOP`, `MOVETO`, `LINETO`, `CURVE3`, `CURVE4`
or `CLOSEPOLY`. For codes that correspond to more than one
vertex (`CURVE3` and `CURVE4`), that code will be repeated so
that the length of `vertices` and `codes` is always
the same.
"""
return self._codes
def codes(self, codes):
if self._readonly:
raise AttributeError("Can't set codes on a readonly Path")
self._codes = codes
self._update_values()
def simplify_threshold(self):
"""
The fraction of a pixel difference below which vertices will
be simplified out.
"""
return self._simplify_threshold
def simplify_threshold(self, threshold):
self._simplify_threshold = threshold
def should_simplify(self):
"""
`True` if the vertices array should be simplified.
"""
return self._should_simplify
def should_simplify(self, should_simplify):
self._should_simplify = should_simplify
def readonly(self):
"""
`True` if the `Path` is read-only.
"""
return self._readonly
def copy(self):
"""
Return a shallow copy of the `Path`, which will share the
vertices and codes with the source `Path`.
"""
return copy.copy(self)
def __deepcopy__(self, memo=None):
"""
Return a deepcopy of the `Path`. The `Path` will not be
readonly, even if the source `Path` is.
"""
# Deepcopying arrays (vertices, codes) strips the writeable=False flag.
p = copy.deepcopy(super(), memo)
p._readonly = False
return p
deepcopy = __deepcopy__
def make_compound_path_from_polys(cls, XY):
"""
Make a compound `Path` object to draw a number of polygons with equal
numbers of sides.
.. plot:: gallery/misc/histogram_path.py
Parameters
----------
XY : (numpolys, numsides, 2) array
"""
# for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for
# the CLOSEPOLY; the vert for the closepoly is ignored but we still
# need it to keep the codes aligned with the vertices
numpolys, numsides, two = XY.shape
if two != 2:
raise ValueError("The third dimension of 'XY' must be 2")
stride = numsides + 1
nverts = numpolys * stride
verts = np.zeros((nverts, 2))
codes = np.full(nverts, cls.LINETO, dtype=cls.code_type)
codes[0::stride] = cls.MOVETO
codes[numsides::stride] = cls.CLOSEPOLY
for i in range(numsides):
verts[i::stride] = XY[:, i]
return cls(verts, codes)
def make_compound_path(cls, *args):
"""
Make a compound path from a list of `Path` objects. Blindly removes
all `Path.STOP` control points.
"""
# Handle an empty list in args (i.e. no args).
if not args:
return Path(np.empty([0, 2], dtype=np.float32))
vertices = np.concatenate([x.vertices for x in args])
codes = np.empty(len(vertices), dtype=cls.code_type)
i = 0
for path in args:
if path.codes is None:
codes[i] = cls.MOVETO
codes[i + 1:i + len(path.vertices)] = cls.LINETO
else:
codes[i:i + len(path.codes)] = path.codes
i += len(path.vertices)
# remove STOP's, since internal STOPs are a bug
not_stop_mask = codes != cls.STOP
vertices = vertices[not_stop_mask, :]
codes = codes[not_stop_mask]
return cls(vertices, codes)
def __repr__(self):
return "Path(%r, %r)" % (self.vertices, self.codes)
def __len__(self):
return len(self.vertices)
def iter_segments(self, transform=None, remove_nans=True, clip=None,
snap=False, stroke_width=1.0, simplify=None,
curves=True, sketch=None):
"""
Iterate over all curve segments in the path.
Each iteration returns a pair ``(vertices, code)``, where ``vertices``
is a sequence of 1-3 coordinate pairs, and ``code`` is a `Path` code.
Additionally, this method can provide a number of standard cleanups and
conversions to the path.
Parameters
----------
transform : None or :class:`~matplotlib.transforms.Transform`
If not None, the given affine transformation will be applied to the
path.
remove_nans : bool, optional
Whether to remove all NaNs from the path and skip over them using
MOVETO commands.
clip : None or (float, float, float, float), optional
If not None, must be a four-tuple (x1, y1, x2, y2)
defining a rectangle in which to clip the path.
snap : None or bool, optional
If True, snap all nodes to pixels; if False, don't snap them.
If None, snap if the path contains only segments
parallel to the x or y axes, and no more than 1024 of them.
stroke_width : float, optional
The width of the stroke being drawn (used for path snapping).
simplify : None or bool, optional
Whether to simplify the path by removing vertices
that do not affect its appearance. If None, use the
:attr:`should_simplify` attribute. See also :rc:`path.simplify`
and :rc:`path.simplify_threshold`.
curves : bool, optional
If True, curve segments will be returned as curve segments.
If False, all curves will be converted to line segments.
sketch : None or sequence, optional
If not None, must be a 3-tuple of the form
(scale, length, randomness), representing the sketch parameters.
"""
if not len(self):
return
cleaned = self.cleaned(transform=transform,
remove_nans=remove_nans, clip=clip,
snap=snap, stroke_width=stroke_width,
simplify=simplify, curves=curves,
sketch=sketch)
# Cache these object lookups for performance in the loop.
NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE
STOP = self.STOP
vertices = iter(cleaned.vertices)
codes = iter(cleaned.codes)
for curr_vertices, code in zip(vertices, codes):
if code == STOP:
break
extra_vertices = NUM_VERTICES_FOR_CODE[code] - 1
if extra_vertices:
for i in range(extra_vertices):
next(codes)
curr_vertices = np.append(curr_vertices, next(vertices))
yield curr_vertices, code
def iter_bezier(self, **kwargs):
"""
Iterate over each Bézier curve (lines included) in a Path.
Parameters
----------
**kwargs
Forwarded to `.iter_segments`.
Yields
------
B : matplotlib.bezier.BezierSegment
The Bézier curves that make up the current path. Note in particular
that freestanding points are Bézier curves of order 0, and lines
are Bézier curves of order 1 (with two control points).
code : Path.code_type
The code describing what kind of curve is being returned.
Path.MOVETO, Path.LINETO, Path.CURVE3, Path.CURVE4 correspond to
Bézier curves with 1, 2, 3, and 4 control points (respectively).
Path.CLOSEPOLY is a Path.LINETO with the control points correctly
chosen based on the start/end points of the current stroke.
"""
first_vert = None
prev_vert = None
for verts, code in self.iter_segments(**kwargs):
if first_vert is None:
if code != Path.MOVETO:
raise ValueError("Malformed path, must start with MOVETO.")
if code == Path.MOVETO: # a point is like "CURVE1"
first_vert = verts
yield BezierSegment(np.array([first_vert])), code
elif code == Path.LINETO: # "CURVE2"
yield BezierSegment(np.array([prev_vert, verts])), code
elif code == Path.CURVE3:
yield BezierSegment(np.array([prev_vert, verts[:2],
verts[2:]])), code
elif code == Path.CURVE4:
yield BezierSegment(np.array([prev_vert, verts[:2],
verts[2:4], verts[4:]])), code
elif code == Path.CLOSEPOLY:
yield BezierSegment(np.array([prev_vert, first_vert])), code
elif code == Path.STOP:
return
else:
raise ValueError(f"Invalid Path.code_type: {code}")
prev_vert = verts[-2:]
def cleaned(self, transform=None, remove_nans=False, clip=None,
*, simplify=False, curves=False,
stroke_width=1.0, snap=False, sketch=None):
"""
Return a new Path with vertices and codes cleaned according to the
parameters.
See Also
--------
Path.iter_segments : for details of the keyword arguments.
"""
vertices, codes = _path.cleanup_path(
self, transform, remove_nans, clip, snap, stroke_width, simplify,
curves, sketch)
pth = Path._fast_from_codes_and_verts(vertices, codes, self)
if not simplify:
pth._should_simplify = False
return pth
def transformed(self, transform):
"""
Return a transformed copy of the path.
See Also
--------
matplotlib.transforms.TransformedPath
A specialized path class that will cache the transformed result and
automatically update when the transform changes.
"""
return Path(transform.transform(self.vertices), self.codes,
self._interpolation_steps)
def contains_point(self, point, transform=None, radius=0.0):
"""
Return whether the area enclosed by the path contains the given point.
The path is always treated as closed; i.e. if the last code is not
CLOSEPOLY an implicit segment connecting the last vertex to the first
vertex is assumed.
Parameters
----------
point : (float, float)
The point (x, y) to check.
transform : `matplotlib.transforms.Transform`, optional
If not ``None``, *point* will be compared to ``self`` transformed
by *transform*; i.e. for a correct check, *transform* should
transform the path into the coordinate system of *point*.
radius : float, default: 0
Additional margin on the path in coordinates of *point*.
The path is extended tangentially by *radius/2*; i.e. if you would
draw the path with a linewidth of *radius*, all points on the line
would still be considered to be contained in the area. Conversely,
negative values shrink the area: Points on the imaginary line
will be considered outside the area.
Returns
-------
bool
Notes
-----
The current algorithm has some limitations:
- The result is undefined for points exactly at the boundary
(i.e. at the path shifted by *radius/2*).
- The result is undefined if there is no enclosed area, i.e. all
vertices are on a straight line.
- If bounding lines start to cross each other due to *radius* shift,
the result is not guaranteed to be correct.
"""
if transform is not None:
transform = transform.frozen()
# `point_in_path` does not handle nonlinear transforms, so we
# transform the path ourselves. If *transform* is affine, letting
# `point_in_path` handle the transform avoids allocating an extra
# buffer.
if transform and not transform.is_affine:
self = transform.transform_path(self)
transform = None
return _path.point_in_path(point[0], point[1], radius, self, transform)
def contains_points(self, points, transform=None, radius=0.0):
"""
Return whether the area enclosed by the path contains the given points.
The path is always treated as closed; i.e. if the last code is not
CLOSEPOLY an implicit segment connecting the last vertex to the first
vertex is assumed.
Parameters
----------
points : (N, 2) array
The points to check. Columns contain x and y values.
transform : `matplotlib.transforms.Transform`, optional
If not ``None``, *points* will be compared to ``self`` transformed
by *transform*; i.e. for a correct check, *transform* should
transform the path into the coordinate system of *points*.
radius : float, default: 0
Additional margin on the path in coordinates of *points*.
The path is extended tangentially by *radius/2*; i.e. if you would
draw the path with a linewidth of *radius*, all points on the line
would still be considered to be contained in the area. Conversely,
negative values shrink the area: Points on the imaginary line
will be considered outside the area.
Returns
-------
length-N bool array
Notes
-----
The current algorithm has some limitations:
- The result is undefined for points exactly at the boundary
(i.e. at the path shifted by *radius/2*).
- The result is undefined if there is no enclosed area, i.e. all
vertices are on a straight line.
- If bounding lines start to cross each other due to *radius* shift,
the result is not guaranteed to be correct.
"""
if transform is not None:
transform = transform.frozen()
result = _path.points_in_path(points, radius, self, transform)
return result.astype('bool')
def contains_path(self, path, transform=None):
"""
Return whether this (closed) path completely contains the given path.
If *transform* is not ``None``, the path will be transformed before
checking for containment.
"""
if transform is not None:
transform = transform.frozen()
return _path.path_in_path(self, None, path, transform)
def get_extents(self, transform=None, **kwargs):
"""
Get Bbox of the path.
Parameters
----------
transform : matplotlib.transforms.Transform, optional
Transform to apply to path before computing extents, if any.
**kwargs
Forwarded to `.iter_bezier`.
Returns
-------
matplotlib.transforms.Bbox
The extents of the path Bbox([[xmin, ymin], [xmax, ymax]])
"""
from .transforms import Bbox
if transform is not None:
self = transform.transform_path(self)
if self.codes is None:
xys = self.vertices
elif len(np.intersect1d(self.codes, [Path.CURVE3, Path.CURVE4])) == 0:
# Optimization for the straight line case.
# Instead of iterating through each curve, consider
# each line segment's end-points
# (recall that STOP and CLOSEPOLY vertices are ignored)
xys = self.vertices[np.isin(self.codes,
[Path.MOVETO, Path.LINETO])]
else:
xys = []
for curve, code in self.iter_bezier(**kwargs):
# places where the derivative is zero can be extrema
_, dzeros = curve.axis_aligned_extrema()
# as can the ends of the curve
xys.append(curve([0, *dzeros, 1]))
xys = np.concatenate(xys)
if len(xys):
return Bbox([xys.min(axis=0), xys.max(axis=0)])
else:
return Bbox.null()
def intersects_path(self, other, filled=True):
"""
Return whether if this path intersects another given path.
If *filled* is True, then this also returns True if one path completely
encloses the other (i.e., the paths are treated as filled).
"""
return _path.path_intersects_path(self, other, filled)
def intersects_bbox(self, bbox, filled=True):
"""
Return whether this path intersects a given `~.transforms.Bbox`.
If *filled* is True, then this also returns True if the path completely
encloses the `.Bbox` (i.e., the path is treated as filled).
The bounding box is always considered filled.
"""
return _path.path_intersects_rectangle(
self, bbox.x0, bbox.y0, bbox.x1, bbox.y1, filled)
def interpolated(self, steps):
"""
Return a new path resampled to length N x steps.
Codes other than LINETO are not handled correctly.
"""
if steps == 1:
return self
vertices = simple_linear_interpolation(self.vertices, steps)
codes = self.codes
if codes is not None:
new_codes = np.full((len(codes) - 1) * steps + 1, Path.LINETO,
dtype=self.code_type)
new_codes[0::steps] = codes
else:
new_codes = None
return Path(vertices, new_codes)
def to_polygons(self, transform=None, width=0, height=0, closed_only=True):
"""
Convert this path to a list of polygons or polylines. Each
polygon/polyline is an Nx2 array of vertices. In other words,
each polygon has no ``MOVETO`` instructions or curves. This
is useful for displaying in backends that do not support
compound paths or Bézier curves.
If *width* and *height* are both non-zero then the lines will
be simplified so that vertices outside of (0, 0), (width,
height) will be clipped.
If *closed_only* is `True` (default), only closed polygons,
with the last point being the same as the first point, will be
returned. Any unclosed polylines in the path will be
explicitly closed. If *closed_only* is `False`, any unclosed
polygons in the path will be returned as unclosed polygons,
and the closed polygons will be returned explicitly closed by
setting the last point to the same as the first point.
"""
if len(self.vertices) == 0:
return []
if transform is not None:
transform = transform.frozen()
if self.codes is None and (width == 0 or height == 0):
vertices = self.vertices
if closed_only:
if len(vertices) < 3:
return []
elif np.any(vertices[0] != vertices[-1]):
vertices = [*vertices, vertices[0]]
if transform is None:
return [vertices]
else:
return [transform.transform(vertices)]
# Deal with the case where there are curves and/or multiple
# subpaths (using extension code)
return _path.convert_path_to_polygons(
self, transform, width, height, closed_only)
_unit_rectangle = None
def unit_rectangle(cls):
"""
Return a `Path` instance of the unit rectangle from (0, 0) to (1, 1).
"""
if cls._unit_rectangle is None:
cls._unit_rectangle = cls([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]],
closed=True, readonly=True)
return cls._unit_rectangle
_unit_regular_polygons = WeakValueDictionary()
def unit_regular_polygon(cls, numVertices):
"""
Return a :class:`Path` instance for a unit regular polygon with the
given *numVertices* such that the circumscribing circle has radius 1.0,
centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_polygons.get(numVertices)
else:
path = None
if path is None:
theta = ((2 * np.pi / numVertices) * np.arange(numVertices + 1)
# This initial rotation is to make sure the polygon always
# "points-up".
+ np.pi / 2)
verts = np.column_stack((np.cos(theta), np.sin(theta)))
path = cls(verts, closed=True, readonly=True)
if numVertices <= 16:
cls._unit_regular_polygons[numVertices] = path
return path
_unit_regular_stars = WeakValueDictionary()
def unit_regular_star(cls, numVertices, innerCircle=0.5):
"""
Return a :class:`Path` for a unit regular star with the given
numVertices and radius of 1.0, centered at (0, 0).
"""
if numVertices <= 16:
path = cls._unit_regular_stars.get((numVertices, innerCircle))
else:
path = None
if path is None:
ns2 = numVertices * 2
theta = (2*np.pi/ns2 * np.arange(ns2 + 1))
# This initial rotation is to make sure the polygon always
# "points-up"
theta += np.pi / 2.0
r = np.ones(ns2 + 1)
r[1::2] = innerCircle
verts = (r * np.vstack((np.cos(theta), np.sin(theta)))).T
path = cls(verts, closed=True, readonly=True)
if numVertices <= 16:
cls._unit_regular_stars[(numVertices, innerCircle)] = path
return path
def unit_regular_asterisk(cls, numVertices):
"""
Return a :class:`Path` for a unit regular asterisk with the given
numVertices and radius of 1.0, centered at (0, 0).
"""
return cls.unit_regular_star(numVertices, 0.0)
_unit_circle = None
def unit_circle(cls):
"""
Return the readonly :class:`Path` of the unit circle.
For most cases, :func:`Path.circle` will be what you want.
"""
if cls._unit_circle is None:
cls._unit_circle = cls.circle(center=(0, 0), radius=1,
readonly=True)
return cls._unit_circle
def circle(cls, center=(0., 0.), radius=1., readonly=False):
"""
Return a `Path` representing a circle of a given radius and center.
Parameters
----------
center : (float, float), default: (0, 0)
The center of the circle.
radius : float, default: 1
The radius of the circle.
readonly : bool
Whether the created path should have the "readonly" argument
set when creating the Path instance.
Notes
-----
The circle is approximated using 8 cubic Bézier curves, as described in
Lancaster, Don. `Approximating a Circle or an Ellipse Using Four
Bezier Cubic Splines <https://www.tinaja.com/glib/ellipse4.pdf>`_.
"""
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = SQRTHALF * MAGIC
vertices = np.array([[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[-MAGIC, 1.0],
[-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45],
[-SQRTHALF, SQRTHALF],
[-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45],
[-1.0, MAGIC],
[-1.0, 0.0],
[-1.0, -MAGIC],
[-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45],
[-SQRTHALF, -SQRTHALF],
[-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45],
[-MAGIC, -1.0],
[0.0, -1.0],
[0.0, -1.0]],
dtype=float)
codes = [cls.CURVE4] * 26
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
return Path(vertices * radius + center, codes, readonly=readonly)
_unit_circle_righthalf = None
def unit_circle_righthalf(cls):
"""
Return a `Path` of the right half of a unit circle.
See `Path.circle` for the reference on the approximation used.
"""
if cls._unit_circle_righthalf is None:
MAGIC = 0.2652031
SQRTHALF = np.sqrt(0.5)
MAGIC45 = SQRTHALF * MAGIC
vertices = np.array(
[[0.0, -1.0],
[MAGIC, -1.0],
[SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45],
[SQRTHALF, -SQRTHALF],
[SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45],
[1.0, -MAGIC],
[1.0, 0.0],
[1.0, MAGIC],
[SQRTHALF+MAGIC45, SQRTHALF-MAGIC45],
[SQRTHALF, SQRTHALF],
[SQRTHALF-MAGIC45, SQRTHALF+MAGIC45],
[MAGIC, 1.0],
[0.0, 1.0],
[0.0, -1.0]],
float)
codes = np.full(14, cls.CURVE4, dtype=cls.code_type)
codes[0] = cls.MOVETO
codes[-1] = cls.CLOSEPOLY
cls._unit_circle_righthalf = cls(vertices, codes, readonly=True)
return cls._unit_circle_righthalf
def arc(cls, theta1, theta2, n=None, is_wedge=False):
"""
Return a `Path` for the unit circle arc from angles *theta1* to
*theta2* (in degrees).
*theta2* is unwrapped to produce the shortest arc within 360 degrees.
That is, if *theta2* > *theta1* + 360, the arc will be from *theta1* to
*theta2* - 360 and not a full circle plus some extra overlap.
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
Masionobe, L. 2003. `Drawing an elliptical arc using
polylines, quadratic or cubic Bezier curves
<https://web.archive.org/web/20190318044212/http://www.spaceroots.org/documents/ellipse/index.html>`_.
"""
halfpi = np.pi * 0.5
eta1 = theta1
eta2 = theta2 - 360 * np.floor((theta2 - theta1) / 360)
# Ensure 2pi range is not flattened to 0 due to floating-point errors,
# but don't try to expand existing 0 range.
if theta2 != theta1 and eta2 <= eta1:
eta2 += 360
eta1, eta2 = np.deg2rad([eta1, eta2])
# number of curve segments to make
if n is None:
n = int(2 ** np.ceil((eta2 - eta1) / halfpi))
if n < 1:
raise ValueError("n must be >= 1 or None")
deta = (eta2 - eta1) / n
t = np.tan(0.5 * deta)
alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0
steps = np.linspace(eta1, eta2, n + 1, True)
cos_eta = np.cos(steps)
sin_eta = np.sin(steps)
xA = cos_eta[:-1]
yA = sin_eta[:-1]
xA_dot = -yA
yA_dot = xA
xB = cos_eta[1:]
yB = sin_eta[1:]
xB_dot = -yB
yB_dot = xB
if is_wedge:
length = n * 3 + 4
vertices = np.zeros((length, 2), float)
codes = np.full(length, cls.CURVE4, dtype=cls.code_type)
vertices[1] = [xA[0], yA[0]]
codes[0:2] = [cls.MOVETO, cls.LINETO]
codes[-2:] = [cls.LINETO, cls.CLOSEPOLY]
vertex_offset = 2
end = length - 2
else:
length = n * 3 + 1
vertices = np.empty((length, 2), float)
codes = np.full(length, cls.CURVE4, dtype=cls.code_type)
vertices[0] = [xA[0], yA[0]]
codes[0] = cls.MOVETO
vertex_offset = 1
end = length
vertices[vertex_offset:end:3, 0] = xA + alpha * xA_dot
vertices[vertex_offset:end:3, 1] = yA + alpha * yA_dot
vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot
vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot
vertices[vertex_offset+2:end:3, 0] = xB
vertices[vertex_offset+2:end:3, 1] = yB
return cls(vertices, codes, readonly=True)
def wedge(cls, theta1, theta2, n=None):
"""
Return a `Path` for the unit circle wedge from angles *theta1* to
*theta2* (in degrees).
*theta2* is unwrapped to produce the shortest wedge within 360 degrees.
That is, if *theta2* > *theta1* + 360, the wedge will be from *theta1*
to *theta2* - 360 and not a full circle plus some extra overlap.
If *n* is provided, it is the number of spline segments to make.
If *n* is not provided, the number of spline segments is
determined based on the delta between *theta1* and *theta2*.
See `Path.arc` for the reference on the approximation used.
"""
return cls.arc(theta1, theta2, n, True)
def hatch(hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates a Path that
can be used in a repeated hatching pattern. *density* is the
number of lines per unit square.
"""
from matplotlib.hatch import get_path
return (get_path(hatchpattern, density)
if hatchpattern is not None else None)
def clip_to_bbox(self, bbox, inside=True):
"""
Clip the path to the given bounding box.
The path must be made up of one or more closed polygons. This
algorithm will not behave correctly for unclosed paths.
If *inside* is `True`, clip to the inside of the box, otherwise
to the outside of the box.
"""
verts = _path.clip_path_to_rect(self, bbox, inside)
paths = [Path(poly) for poly in verts]
return self.make_compound_path(*paths)
The provided code snippet includes necessary dependencies for implementing the `get_path` function. Write a Python function `def get_path(hatchpattern, density=6)` to solve the following problem:
Given a hatch specifier, *hatchpattern*, generates Path to render the hatch in a unit square. *density* is the number of lines per unit square.
Here is the function:
def get_path(hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates Path to render
the hatch in a unit square. *density* is the number of lines per
unit square.
"""
density = int(density)
patterns = [hatch_type(hatchpattern, density)
for hatch_type in _hatch_types]
num_vertices = sum([pattern.num_vertices for pattern in patterns])
if num_vertices == 0:
return Path(np.empty((0, 2)))
vertices = np.empty((num_vertices, 2))
codes = np.empty(num_vertices, Path.code_type)
cursor = 0
for pattern in patterns:
if pattern.num_vertices != 0:
vertices_chunk = vertices[cursor:cursor + pattern.num_vertices]
codes_chunk = codes[cursor:cursor + pattern.num_vertices]
pattern.set_vertices_and_codes(vertices_chunk, codes_chunk)
cursor += pattern.num_vertices
return Path(vertices, codes) | Given a hatch specifier, *hatchpattern*, generates Path to render the hatch in a unit square. *density* is the number of lines per unit square. |
170,960 | import binascii
import functools
import logging
import re
import string
import struct
import numpy as np
from matplotlib.cbook import _format_approx
from . import _api
class _NameToken(_Token):
kind = 'name'
def is_slash_name(self):
return self.raw.startswith('/')
def value(self):
return self.raw[1:]
class _BooleanToken(_Token):
kind = 'boolean'
def value(self):
return self.raw == 'true'
class _KeywordToken(_Token):
kind = 'keyword'
def is_keyword(self, *names):
return self.raw in names
class _DelimiterToken(_Token):
kind = 'delimiter'
def is_delim(self):
return True
def opposite(self):
return {'[': ']', ']': '[',
'{': '}', '}': '{',
'<<': '>>', '>>': '<<'
}[self.raw]
class _WhitespaceToken(_Token):
kind = 'whitespace'
class _StringToken(_Token):
kind = 'string'
_escapes_re = re.compile(r'\\([\\()nrtbf]|[0-7]{1,3})')
_replacements = {'\\': '\\', '(': '(', ')': ')', 'n': '\n',
'r': '\r', 't': '\t', 'b': '\b', 'f': '\f'}
_ws_re = re.compile('[\0\t\r\f\n ]')
def _escape(cls, match):
group = match.group(1)
try:
return cls._replacements[group]
except KeyError:
return chr(int(group, 8))
def value(self):
if self.raw[0] == '(':
return self._escapes_re.sub(self._escape, self.raw[1:-1])
else:
data = self._ws_re.sub('', self.raw[1:-1])
if len(data) % 2 == 1:
data += '0'
return binascii.unhexlify(data)
class _BinaryToken(_Token):
kind = 'binary'
def value(self):
return self.raw[1:]
class _NumberToken(_Token):
kind = 'number'
def is_number(self):
return True
def value(self):
if '.' not in self.raw:
return int(self.raw)
else:
return float(self.raw)
The provided code snippet includes necessary dependencies for implementing the `_tokenize` function. Write a Python function `def _tokenize(data: bytes, skip_ws: bool)` to solve the following problem:
A generator that produces _Token instances from Type-1 font code. The consumer of the generator may send an integer to the tokenizer to indicate that the next token should be _BinaryToken of the given length. Parameters ---------- data : bytes The data of the font to tokenize. skip_ws : bool If true, the generator will drop any _WhitespaceTokens from the output.
Here is the function:
def _tokenize(data: bytes, skip_ws: bool):
"""
A generator that produces _Token instances from Type-1 font code.
The consumer of the generator may send an integer to the tokenizer to
indicate that the next token should be _BinaryToken of the given length.
Parameters
----------
data : bytes
The data of the font to tokenize.
skip_ws : bool
If true, the generator will drop any _WhitespaceTokens from the output.
"""
text = data.decode('ascii', 'replace')
whitespace_or_comment_re = re.compile(r'[\0\t\r\f\n ]+|%[^\r\n]*')
token_re = re.compile(r'/{0,2}[^]\0\t\r\f\n ()<>{}/%[]+')
instring_re = re.compile(r'[()\\]')
hex_re = re.compile(r'^<[0-9a-fA-F\0\t\r\f\n ]*>$')
oct_re = re.compile(r'[0-7]{1,3}')
pos = 0
next_binary = None
while pos < len(text):
if next_binary is not None:
n = next_binary
next_binary = (yield _BinaryToken(pos, data[pos:pos+n]))
pos += n
continue
match = whitespace_or_comment_re.match(text, pos)
if match:
if not skip_ws:
next_binary = (yield _WhitespaceToken(pos, match.group()))
pos = match.end()
elif text[pos] == '(':
# PostScript string rules:
# - parentheses must be balanced
# - backslashes escape backslashes and parens
# - also codes \n\r\t\b\f and octal escapes are recognized
# - other backslashes do not escape anything
start = pos
pos += 1
depth = 1
while depth:
match = instring_re.search(text, pos)
if match is None:
raise ValueError(
f'Unterminated string starting at {start}')
pos = match.end()
if match.group() == '(':
depth += 1
elif match.group() == ')':
depth -= 1
else: # a backslash
char = text[pos]
if char in r'\()nrtbf':
pos += 1
else:
octal = oct_re.match(text, pos)
if octal:
pos = octal.end()
else:
pass # non-escaping backslash
next_binary = (yield _StringToken(start, text[start:pos]))
elif text[pos:pos + 2] in ('<<', '>>'):
next_binary = (yield _DelimiterToken(pos, text[pos:pos + 2]))
pos += 2
elif text[pos] == '<':
start = pos
try:
pos = text.index('>', pos) + 1
except ValueError as e:
raise ValueError(f'Unterminated hex string starting at {start}'
) from e
if not hex_re.match(text[start:pos]):
raise ValueError(f'Malformed hex string starting at {start}')
next_binary = (yield _StringToken(pos, text[start:pos]))
else:
match = token_re.match(text, pos)
if match:
raw = match.group()
if raw.startswith('/'):
next_binary = (yield _NameToken(pos, raw))
elif match.group() in ('true', 'false'):
next_binary = (yield _BooleanToken(pos, raw))
else:
try:
float(raw)
next_binary = (yield _NumberToken(pos, raw))
except ValueError:
next_binary = (yield _KeywordToken(pos, raw))
pos = match.end()
else:
next_binary = (yield _DelimiterToken(pos, text[pos]))
pos += 1 | A generator that produces _Token instances from Type-1 font code. The consumer of the generator may send an integer to the tokenizer to indicate that the next token should be _BinaryToken of the given length. Parameters ---------- data : bytes The data of the font to tokenize. skip_ws : bool If true, the generator will drop any _WhitespaceTokens from the output. |
170,961 | import binascii
import functools
import logging
import re
import string
import struct
import numpy as np
from matplotlib.cbook import _format_approx
from . import _api
class _BalancedExpression(_Token):
pass
The provided code snippet includes necessary dependencies for implementing the `_expression` function. Write a Python function `def _expression(initial, tokens, data)` to solve the following problem:
Consume some number of tokens and return a balanced PostScript expression. Parameters ---------- initial : _Token The token that triggered parsing a balanced expression. tokens : iterator of _Token Following tokens. data : bytes Underlying data that the token positions point to. Returns ------- _BalancedExpression
Here is the function:
def _expression(initial, tokens, data):
"""
Consume some number of tokens and return a balanced PostScript expression.
Parameters
----------
initial : _Token
The token that triggered parsing a balanced expression.
tokens : iterator of _Token
Following tokens.
data : bytes
Underlying data that the token positions point to.
Returns
-------
_BalancedExpression
"""
delim_stack = []
token = initial
while True:
if token.is_delim():
if token.raw in ('[', '{'):
delim_stack.append(token)
elif token.raw in (']', '}'):
if not delim_stack:
raise RuntimeError(f"unmatched closing token {token}")
match = delim_stack.pop()
if match.raw != token.opposite():
raise RuntimeError(
f"opening token {match} closed by {token}"
)
if not delim_stack:
break
else:
raise RuntimeError(f'unknown delimiter {token}')
elif not delim_stack:
break
token = next(tokens)
return _BalancedExpression(
initial.pos,
data[initial.pos:token.endpos()].decode('ascii', 'replace')
) | Consume some number of tokens and return a balanced PostScript expression. Parameters ---------- initial : _Token The token that triggered parsing a balanced expression. tokens : iterator of _Token Following tokens. data : bytes Underlying data that the token positions point to. Returns ------- _BalancedExpression |
170,962 |
The provided code snippet includes necessary dependencies for implementing the `blocking_input_loop` function. Write a Python function `def blocking_input_loop(figure, event_names, timeout, handler)` to solve the following problem:
Run *figure*'s event loop while listening to interactive events. The events listed in *event_names* are passed to *handler*. This function is used to implement `.Figure.waitforbuttonpress`, `.Figure.ginput`, and `.Axes.clabel`. Parameters ---------- figure : `~matplotlib.figure.Figure` event_names : list of str The names of the events passed to *handler*. timeout : float If positive, the event loop is stopped after *timeout* seconds. handler : Callable[[Event], Any] Function called for each event; it can force an early exit of the event loop by calling ``canvas.stop_event_loop()``.
Here is the function:
def blocking_input_loop(figure, event_names, timeout, handler):
"""
Run *figure*'s event loop while listening to interactive events.
The events listed in *event_names* are passed to *handler*.
This function is used to implement `.Figure.waitforbuttonpress`,
`.Figure.ginput`, and `.Axes.clabel`.
Parameters
----------
figure : `~matplotlib.figure.Figure`
event_names : list of str
The names of the events passed to *handler*.
timeout : float
If positive, the event loop is stopped after *timeout* seconds.
handler : Callable[[Event], Any]
Function called for each event; it can force an early exit of the event
loop by calling ``canvas.stop_event_loop()``.
"""
if figure.canvas.manager:
figure.show() # Ensure that the figure is shown if we are managing it.
# Connect the events to the on_event function call.
cids = [figure.canvas.mpl_connect(name, handler) for name in event_names]
try:
figure.canvas.start_event_loop(timeout) # Start event loop.
finally: # Run even on exception like ctrl-c.
# Disconnect the callbacks.
for cid in cids:
figure.canvas.mpl_disconnect(cid) | Run *figure*'s event loop while listening to interactive events. The events listed in *event_names* are passed to *handler*. This function is used to implement `.Figure.waitforbuttonpress`, `.Figure.ginput`, and `.Axes.clabel`. Parameters ---------- figure : `~matplotlib.figure.Figure` event_names : list of str The names of the events passed to *handler*. timeout : float If positive, the event loop is stopped after *timeout* seconds. handler : Callable[[Event], Any] Function called for each event; it can force an early exit of the event loop by calling ``canvas.stop_event_loop()``. |
170,963 | from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
def adjust_bbox(fig, bbox_inches, fixed_dpi=None):
"""
Temporarily adjust the figure so that only the specified area
(bbox_inches) is saved.
It modifies fig.bbox, fig.bbox_inches,
fig.transFigure._boxout, and fig.patch. While the figure size
changes, the scale of the original figure is conserved. A
function which restores the original values are returned.
"""
origBbox = fig.bbox
origBboxInches = fig.bbox_inches
_boxout = fig.transFigure._boxout
old_aspect = []
locator_list = []
sentinel = object()
for ax in fig.axes:
locator_list.append(ax.get_axes_locator())
current_pos = ax.get_position(original=False).frozen()
ax.set_axes_locator(lambda a, r, _pos=current_pos: _pos)
# override the method that enforces the aspect ratio on the Axes
if 'apply_aspect' in ax.__dict__:
old_aspect.append(ax.apply_aspect)
else:
old_aspect.append(sentinel)
ax.apply_aspect = lambda pos=None: None
def restore_bbox():
for ax, loc, aspect in zip(fig.axes, locator_list, old_aspect):
ax.set_axes_locator(loc)
if aspect is sentinel:
# delete our no-op function which un-hides the original method
del ax.apply_aspect
else:
ax.apply_aspect = aspect
fig.bbox = origBbox
fig.bbox_inches = origBboxInches
fig.transFigure._boxout = _boxout
fig.transFigure.invalidate()
fig.patch.set_bounds(0, 0, 1, 1)
if fixed_dpi is None:
fixed_dpi = fig.dpi
tr = Affine2D().scale(fixed_dpi)
dpi_scale = fixed_dpi / fig.dpi
fig.bbox_inches = Bbox.from_bounds(0, 0, *bbox_inches.size)
x0, y0 = tr.transform(bbox_inches.p0)
w1, h1 = fig.bbox.size * dpi_scale
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0, w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0 / w1, y0 / h1,
fig.bbox.width / w1, fig.bbox.height / h1)
return restore_bbox
The provided code snippet includes necessary dependencies for implementing the `process_figure_for_rasterizing` function. Write a Python function `def process_figure_for_rasterizing(fig, bbox_inches_restore, fixed_dpi=None)` to solve the following problem:
A function that needs to be called when figure dpi changes during the drawing (e.g., rasterizing). It recovers the bbox and re-adjust it with the new dpi.
Here is the function:
def process_figure_for_rasterizing(fig, bbox_inches_restore, fixed_dpi=None):
"""
A function that needs to be called when figure dpi changes during the
drawing (e.g., rasterizing). It recovers the bbox and re-adjust it with
the new dpi.
"""
bbox_inches, restore_bbox = bbox_inches_restore
restore_bbox()
r = adjust_bbox(fig, bbox_inches, fixed_dpi)
return bbox_inches, r | A function that needs to be called when figure dpi changes during the drawing (e.g., rasterizing). It recovers the bbox and re-adjust it with the new dpi. |
170,964 | from collections import namedtuple
from contextlib import ExitStack, contextmanager, nullcontext
from enum import Enum, IntEnum
import functools
import importlib
import inspect
import io
import itertools
import logging
import os
import sys
import time
from weakref import WeakKeyDictionary
import numpy as np
import matplotlib as mpl
from matplotlib import (
_api, backend_tools as tools, cbook, colors, _docstring, text,
_tight_bbox, transforms, widgets, get_backend, is_interactive, rcParams)
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_managers import ToolManager
from matplotlib.cbook import _setattr_cm
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
from matplotlib._enums import JoinStyle, CapStyle
The provided code snippet includes necessary dependencies for implementing the `_safe_pyplot_import` function. Write a Python function `def _safe_pyplot_import()` to solve the following problem:
Import and return ``pyplot``, correctly setting the backend if one is already forced.
Here is the function:
def _safe_pyplot_import():
"""
Import and return ``pyplot``, correctly setting the backend if one is
already forced.
"""
try:
import matplotlib.pyplot as plt
except ImportError: # Likely due to a framework mismatch.
current_framework = cbook._get_running_interactive_framework()
if current_framework is None:
raise # No, something else went wrong, likely with the install...
backend_mapping = {
'qt': 'qtagg',
'gtk3': 'gtk3agg',
'gtk4': 'gtk4agg',
'wx': 'wxagg',
'tk': 'tkagg',
'macosx': 'macosx',
'headless': 'agg',
}
backend = backend_mapping[current_framework]
rcParams["backend"] = mpl.rcParamsOrig["backend"] = backend
import matplotlib.pyplot as plt # Now this should succeed.
return plt | Import and return ``pyplot``, correctly setting the backend if one is already forced. |
170,965 | from collections import namedtuple
from contextlib import ExitStack, contextmanager, nullcontext
from enum import Enum, IntEnum
import functools
import importlib
import inspect
import io
import itertools
import logging
import os
import sys
import time
from weakref import WeakKeyDictionary
import numpy as np
import matplotlib as mpl
from matplotlib import (
_api, backend_tools as tools, cbook, colors, _docstring, text,
_tight_bbox, transforms, widgets, get_backend, is_interactive, rcParams)
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_managers import ToolManager
from matplotlib.cbook import _setattr_cm
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
from matplotlib._enums import JoinStyle, CapStyle
_default_filetypes = {
'eps': 'Encapsulated Postscript',
'jpg': 'Joint Photographic Experts Group',
'jpeg': 'Joint Photographic Experts Group',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics',
'tif': 'Tagged Image File Format',
'tiff': 'Tagged Image File Format',
'webp': 'WebP Image Format',
}
_default_backends = {
'eps': 'matplotlib.backends.backend_ps',
'jpg': 'matplotlib.backends.backend_agg',
'jpeg': 'matplotlib.backends.backend_agg',
'pdf': 'matplotlib.backends.backend_pdf',
'pgf': 'matplotlib.backends.backend_pgf',
'png': 'matplotlib.backends.backend_agg',
'ps': 'matplotlib.backends.backend_ps',
'raw': 'matplotlib.backends.backend_agg',
'rgba': 'matplotlib.backends.backend_agg',
'svg': 'matplotlib.backends.backend_svg',
'svgz': 'matplotlib.backends.backend_svg',
'tif': 'matplotlib.backends.backend_agg',
'tiff': 'matplotlib.backends.backend_agg',
'webp': 'matplotlib.backends.backend_agg',
}
The provided code snippet includes necessary dependencies for implementing the `register_backend` function. Write a Python function `def register_backend(format, backend, description=None)` to solve the following problem:
Register a backend for saving to a given file format. Parameters ---------- format : str File extension backend : module string or canvas class Backend for handling file output description : str, default: "" Description of the file type.
Here is the function:
def register_backend(format, backend, description=None):
"""
Register a backend for saving to a given file format.
Parameters
----------
format : str
File extension
backend : module string or canvas class
Backend for handling file output
description : str, default: ""
Description of the file type.
"""
if description is None:
description = ''
_default_backends[format] = backend
_default_filetypes[format] = description | Register a backend for saving to a given file format. Parameters ---------- format : str File extension backend : module string or canvas class Backend for handling file output description : str, default: "" Description of the file type. |
170,966 | from collections import namedtuple
from contextlib import ExitStack, contextmanager, nullcontext
from enum import Enum, IntEnum
import functools
import importlib
import inspect
import io
import itertools
import logging
import os
import sys
import time
from weakref import WeakKeyDictionary
import numpy as np
import matplotlib as mpl
from matplotlib import (
_api, backend_tools as tools, cbook, colors, _docstring, text,
_tight_bbox, transforms, widgets, get_backend, is_interactive, rcParams)
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_managers import ToolManager
from matplotlib.cbook import _setattr_cm
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
from matplotlib._enums import JoinStyle, CapStyle
_default_backends = {
'eps': 'matplotlib.backends.backend_ps',
'jpg': 'matplotlib.backends.backend_agg',
'jpeg': 'matplotlib.backends.backend_agg',
'pdf': 'matplotlib.backends.backend_pdf',
'pgf': 'matplotlib.backends.backend_pgf',
'png': 'matplotlib.backends.backend_agg',
'ps': 'matplotlib.backends.backend_ps',
'raw': 'matplotlib.backends.backend_agg',
'rgba': 'matplotlib.backends.backend_agg',
'svg': 'matplotlib.backends.backend_svg',
'svgz': 'matplotlib.backends.backend_svg',
'tif': 'matplotlib.backends.backend_agg',
'tiff': 'matplotlib.backends.backend_agg',
'webp': 'matplotlib.backends.backend_agg',
}
The provided code snippet includes necessary dependencies for implementing the `get_registered_canvas_class` function. Write a Python function `def get_registered_canvas_class(format)` to solve the following problem:
Return the registered default canvas for given file format. Handles deferred import of required backend.
Here is the function:
def get_registered_canvas_class(format):
"""
Return the registered default canvas for given file format.
Handles deferred import of required backend.
"""
if format not in _default_backends:
return None
backend_class = _default_backends[format]
if isinstance(backend_class, str):
backend_class = importlib.import_module(backend_class).FigureCanvas
_default_backends[format] = backend_class
return backend_class | Return the registered default canvas for given file format. Handles deferred import of required backend. |
170,967 | from collections import namedtuple
from contextlib import ExitStack, contextmanager, nullcontext
from enum import Enum, IntEnum
import functools
import importlib
import inspect
import io
import itertools
import logging
import os
import sys
import time
from weakref import WeakKeyDictionary
import numpy as np
import matplotlib as mpl
from matplotlib import (
_api, backend_tools as tools, cbook, colors, _docstring, text,
_tight_bbox, transforms, widgets, get_backend, is_interactive, rcParams)
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_managers import ToolManager
from matplotlib.cbook import _setattr_cm
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
from matplotlib._enums import JoinStyle, CapStyle
def _key_handler(event):
# Dead reckoning of key.
if event.name == "key_press_event":
event.canvas._key = event.key
elif event.name == "key_release_event":
event.canvas._key = None | null |
170,968 | from collections import namedtuple
from contextlib import ExitStack, contextmanager, nullcontext
from enum import Enum, IntEnum
import functools
import importlib
import inspect
import io
import itertools
import logging
import os
import sys
import time
from weakref import WeakKeyDictionary
import numpy as np
import matplotlib as mpl
from matplotlib import (
_api, backend_tools as tools, cbook, colors, _docstring, text,
_tight_bbox, transforms, widgets, get_backend, is_interactive, rcParams)
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_managers import ToolManager
from matplotlib.cbook import _setattr_cm
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
from matplotlib._enums import JoinStyle, CapStyle
class LocationEvent(Event):
def __init__(self, name, canvas, x, y, guiEvent=None, *, modifiers=None):
def _mouse_handler(event):
# Dead-reckoning of button and key.
if event.name == "button_press_event":
event.canvas._button = event.button
elif event.name == "button_release_event":
event.canvas._button = None
elif event.name == "motion_notify_event" and event.button is None:
event.button = event.canvas._button
if event.key is None:
event.key = event.canvas._key
# Emit axes_enter/axes_leave.
if event.name == "motion_notify_event":
last = LocationEvent.lastevent
last_axes = last.inaxes if last is not None else None
if last_axes != event.inaxes:
if last_axes is not None:
try:
last.canvas.callbacks.process("axes_leave_event", last)
except Exception:
pass # The last canvas may already have been torn down.
if event.inaxes is not None:
event.canvas.callbacks.process("axes_enter_event", event)
LocationEvent.lastevent = (
None if event.name == "figure_leave_event" else event) | null |
170,969 | from collections import namedtuple
from contextlib import ExitStack, contextmanager, nullcontext
from enum import Enum, IntEnum
import functools
import importlib
import inspect
import io
import itertools
import logging
import os
import sys
import time
from weakref import WeakKeyDictionary
import numpy as np
import matplotlib as mpl
from matplotlib import (
_api, backend_tools as tools, cbook, colors, _docstring, text,
_tight_bbox, transforms, widgets, get_backend, is_interactive, rcParams)
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_managers import ToolManager
from matplotlib.cbook import _setattr_cm
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
from matplotlib._enums import JoinStyle, CapStyle
def _no_output_draw(figure):
# _no_output_draw was promoted to the figure level, but
# keep this here in case someone was calling it...
figure.draw_without_rendering() | null |
170,970 | from collections import namedtuple
from contextlib import ExitStack, contextmanager, nullcontext
from enum import Enum, IntEnum
import functools
import importlib
import inspect
import io
import itertools
import logging
import os
import sys
import time
from weakref import WeakKeyDictionary
import numpy as np
import matplotlib as mpl
from matplotlib import (
_api, backend_tools as tools, cbook, colors, _docstring, text,
_tight_bbox, transforms, widgets, get_backend, is_interactive, rcParams)
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_managers import ToolManager
from matplotlib.cbook import _setattr_cm
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
from matplotlib._enums import JoinStyle, CapStyle
The provided code snippet includes necessary dependencies for implementing the `_is_non_interactive_terminal_ipython` function. Write a Python function `def _is_non_interactive_terminal_ipython(ip)` to solve the following problem:
Return whether we are in a terminal IPython, but non interactive. When in _terminal_ IPython, ip.parent will have and `interact` attribute, if this attribute is False we do not setup eventloop integration as the user will _not_ interact with IPython. In all other case (ZMQKernel, or is interactive), we do.
Here is the function:
def _is_non_interactive_terminal_ipython(ip):
"""
Return whether we are in a terminal IPython, but non interactive.
When in _terminal_ IPython, ip.parent will have and `interact` attribute,
if this attribute is False we do not setup eventloop integration as the
user will _not_ interact with IPython. In all other case (ZMQKernel, or is
interactive), we do.
"""
return (hasattr(ip, 'parent')
and (ip.parent is not None)
and getattr(ip.parent, 'interact', None) is False) | Return whether we are in a terminal IPython, but non interactive. When in _terminal_ IPython, ip.parent will have and `interact` attribute, if this attribute is False we do not setup eventloop integration as the user will _not_ interact with IPython. In all other case (ZMQKernel, or is interactive), we do. |
170,971 | from collections import namedtuple
from contextlib import ExitStack, contextmanager, nullcontext
from enum import Enum, IntEnum
import functools
import importlib
import inspect
import io
import itertools
import logging
import os
import sys
import time
from weakref import WeakKeyDictionary
import numpy as np
import matplotlib as mpl
from matplotlib import (
_api, backend_tools as tools, cbook, colors, _docstring, text,
_tight_bbox, transforms, widgets, get_backend, is_interactive, rcParams)
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_managers import ToolManager
from matplotlib.cbook import _setattr_cm
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
from matplotlib._enums import JoinStyle, CapStyle
_log = logging.getLogger(__name__)
class Gcf:
"""
Singleton to maintain the relation between figures and their managers, and
keep track of and "active" figure and manager.
The canvas of a figure created through pyplot is associated with a figure
manager, which handles the interaction between the figure and the backend.
pyplot keeps track of figure managers using an identifier, the "figure
number" or "manager number" (which can actually be any hashable value);
this number is available as the :attr:`number` attribute of the manager.
This class is never instantiated; it consists of an `OrderedDict` mapping
figure/manager numbers to managers, and a set of class methods that
manipulate this `OrderedDict`.
Attributes
----------
figs : OrderedDict
`OrderedDict` mapping numbers to managers; the active manager is at the
end.
"""
figs = OrderedDict()
def get_fig_manager(cls, num):
"""
If manager number *num* exists, make it the active one and return it;
otherwise return *None*.
"""
manager = cls.figs.get(num, None)
if manager is not None:
cls.set_active(manager)
return manager
def destroy(cls, num):
"""
Destroy manager *num* -- either a manager instance or a manager number.
In the interactive backends, this is bound to the window "destroy" and
"delete" events.
It is recommended to pass a manager instance, to avoid confusion when
two managers share the same number.
"""
if all(hasattr(num, attr) for attr in ["num", "destroy"]):
manager = num
if cls.figs.get(manager.num) is manager:
cls.figs.pop(manager.num)
else:
try:
manager = cls.figs.pop(num)
except KeyError:
return
if hasattr(manager, "_cidgcf"):
manager.canvas.mpl_disconnect(manager._cidgcf)
manager.destroy()
del manager, num
def destroy_fig(cls, fig):
"""Destroy figure *fig*."""
num = next((manager.num for manager in cls.figs.values()
if manager.canvas.figure == fig), None)
if num is not None:
cls.destroy(num)
def destroy_all(cls):
"""Destroy all figures."""
for manager in list(cls.figs.values()):
manager.canvas.mpl_disconnect(manager._cidgcf)
manager.destroy()
cls.figs.clear()
def has_fignum(cls, num):
"""Return whether figure number *num* exists."""
return num in cls.figs
def get_all_fig_managers(cls):
"""Return a list of figure managers."""
return list(cls.figs.values())
def get_num_fig_managers(cls):
"""Return the number of figures being managed."""
return len(cls.figs)
def get_active(cls):
"""Return the active manager, or *None* if there is no manager."""
return next(reversed(cls.figs.values())) if cls.figs else None
def _set_new_active_manager(cls, manager):
"""Adopt *manager* into pyplot and make it the active manager."""
if not hasattr(manager, "_cidgcf"):
manager._cidgcf = manager.canvas.mpl_connect(
"button_press_event", lambda event: cls.set_active(manager))
fig = manager.canvas.figure
fig.number = manager.num
label = fig.get_label()
if label:
manager.set_window_title(label)
cls.set_active(manager)
def set_active(cls, manager):
"""Make *manager* the active manager."""
cls.figs[manager.num] = manager
cls.figs.move_to_end(manager.num)
def draw_all(cls, force=False):
"""
Redraw all stale managed figures, or, if *force* is True, all managed
figures.
"""
for manager in cls.get_all_fig_managers():
if force or manager.canvas.figure.stale:
manager.canvas.draw_idle()
The provided code snippet includes necessary dependencies for implementing the `key_press_handler` function. Write a Python function `def key_press_handler(event, canvas=None, toolbar=None)` to solve the following problem:
Implement the default Matplotlib key bindings for the canvas and toolbar described at :ref:`key-event-handling`. Parameters ---------- event : `KeyEvent` A key press/release event. canvas : `FigureCanvasBase`, default: ``event.canvas`` The backend-specific canvas instance. This parameter is kept for back-compatibility, but, if set, should always be equal to ``event.canvas``. toolbar : `NavigationToolbar2`, default: ``event.canvas.toolbar`` The navigation cursor toolbar. This parameter is kept for back-compatibility, but, if set, should always be equal to ``event.canvas.toolbar``.
Here is the function:
def key_press_handler(event, canvas=None, toolbar=None):
"""
Implement the default Matplotlib key bindings for the canvas and toolbar
described at :ref:`key-event-handling`.
Parameters
----------
event : `KeyEvent`
A key press/release event.
canvas : `FigureCanvasBase`, default: ``event.canvas``
The backend-specific canvas instance. This parameter is kept for
back-compatibility, but, if set, should always be equal to
``event.canvas``.
toolbar : `NavigationToolbar2`, default: ``event.canvas.toolbar``
The navigation cursor toolbar. This parameter is kept for
back-compatibility, but, if set, should always be equal to
``event.canvas.toolbar``.
"""
# these bindings happen whether you are over an Axes or not
if event.key is None:
return
if canvas is None:
canvas = event.canvas
if toolbar is None:
toolbar = canvas.toolbar
# Load key-mappings from rcParams.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
quit_keys = rcParams['keymap.quit']
quit_all_keys = rcParams['keymap.quit_all']
grid_keys = rcParams['keymap.grid']
grid_minor_keys = rcParams['keymap.grid_minor']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
# toggle fullscreen mode ('f', 'ctrl + f')
if event.key in fullscreen_keys:
try:
canvas.manager.full_screen_toggle()
except AttributeError:
pass
# quit the figure (default key 'ctrl+w')
if event.key in quit_keys:
Gcf.destroy_fig(canvas.figure)
if event.key in quit_all_keys:
Gcf.destroy_all()
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in home_keys:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
toolbar.pan()
toolbar._update_cursor(event)
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
toolbar.zoom()
toolbar._update_cursor(event)
# saving current figure (default key 's')
elif event.key in save_keys:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an Axes to trigger
def _get_uniform_gridstate(ticks):
# Return True/False if all grid lines are on or off, None if they are
# not all in the same state.
if all(tick.gridline.get_visible() for tick in ticks):
return True
elif not any(tick.gridline.get_visible() for tick in ticks):
return False
else:
return None
ax = event.inaxes
# toggle major grids in current Axes (default key 'g')
# Both here and below (for 'G'), we do nothing if *any* grid (major or
# minor, x or y) is not in a uniform state, to avoid messing up user
# customization.
if (event.key in grid_keys
# Exclude minor grids not in a uniform state.
and None not in [_get_uniform_gridstate(ax.xaxis.minorTicks),
_get_uniform_gridstate(ax.yaxis.minorTicks)]):
x_state = _get_uniform_gridstate(ax.xaxis.majorTicks)
y_state = _get_uniform_gridstate(ax.yaxis.majorTicks)
cycle = [(False, False), (True, False), (True, True), (False, True)]
try:
x_state, y_state = (
cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])
except ValueError:
# Exclude major grids not in a uniform state.
pass
else:
# If turning major grids off, also turn minor grids off.
ax.grid(x_state, which="major" if x_state else "both", axis="x")
ax.grid(y_state, which="major" if y_state else "both", axis="y")
canvas.draw_idle()
# toggle major and minor grids in current Axes (default key 'G')
if (event.key in grid_minor_keys
# Exclude major grids not in a uniform state.
and None not in [_get_uniform_gridstate(ax.xaxis.majorTicks),
_get_uniform_gridstate(ax.yaxis.majorTicks)]):
x_state = _get_uniform_gridstate(ax.xaxis.minorTicks)
y_state = _get_uniform_gridstate(ax.yaxis.minorTicks)
cycle = [(False, False), (True, False), (True, True), (False, True)]
try:
x_state, y_state = (
cycle[(cycle.index((x_state, y_state)) + 1) % len(cycle)])
except ValueError:
# Exclude minor grids not in a uniform state.
pass
else:
ax.grid(x_state, which="both", axis="x")
ax.grid(y_state, which="both", axis="y")
canvas.draw_idle()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw_idle()
elif scale == 'linear':
try:
ax.set_yscale('log')
except ValueError as exc:
_log.warning(str(exc))
ax.set_yscale('linear')
ax.figure.canvas.draw_idle()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw_idle()
elif scalex == 'linear':
try:
ax.set_xscale('log')
except ValueError as exc:
_log.warning(str(exc))
ax.set_xscale('linear')
ax.figure.canvas.draw_idle() | Implement the default Matplotlib key bindings for the canvas and toolbar described at :ref:`key-event-handling`. Parameters ---------- event : `KeyEvent` A key press/release event. canvas : `FigureCanvasBase`, default: ``event.canvas`` The backend-specific canvas instance. This parameter is kept for back-compatibility, but, if set, should always be equal to ``event.canvas``. toolbar : `NavigationToolbar2`, default: ``event.canvas.toolbar`` The navigation cursor toolbar. This parameter is kept for back-compatibility, but, if set, should always be equal to ``event.canvas.toolbar``. |
170,972 | from collections import namedtuple
from contextlib import ExitStack, contextmanager, nullcontext
from enum import Enum, IntEnum
import functools
import importlib
import inspect
import io
import itertools
import logging
import os
import sys
import time
from weakref import WeakKeyDictionary
import numpy as np
import matplotlib as mpl
from matplotlib import (
_api, backend_tools as tools, cbook, colors, _docstring, text,
_tight_bbox, transforms, widgets, get_backend, is_interactive, rcParams)
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_managers import ToolManager
from matplotlib.cbook import _setattr_cm
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
from matplotlib._enums import JoinStyle, CapStyle
class MouseButton(IntEnum):
LEFT = 1
MIDDLE = 2
RIGHT = 3
BACK = 8
FORWARD = 9
The provided code snippet includes necessary dependencies for implementing the `button_press_handler` function. Write a Python function `def button_press_handler(event, canvas=None, toolbar=None)` to solve the following problem:
The default Matplotlib button actions for extra mouse buttons. Parameters are as for `key_press_handler`, except that *event* is a `MouseEvent`.
Here is the function:
def button_press_handler(event, canvas=None, toolbar=None):
"""
The default Matplotlib button actions for extra mouse buttons.
Parameters are as for `key_press_handler`, except that *event* is a
`MouseEvent`.
"""
if canvas is None:
canvas = event.canvas
if toolbar is None:
toolbar = canvas.toolbar
if toolbar is not None:
button_name = str(MouseButton(event.button))
if button_name in rcParams['keymap.back']:
toolbar.back()
elif button_name in rcParams['keymap.forward']:
toolbar.forward() | The default Matplotlib button actions for extra mouse buttons. Parameters are as for `key_press_handler`, except that *event* is a `MouseEvent`. |
170,973 | from collections import namedtuple
import contextlib
from functools import lru_cache, wraps
import inspect
from inspect import Signature, Parameter
import logging
from numbers import Number
import re
import warnings
import numpy as np
import matplotlib as mpl
from . import _api, cbook
from .colors import BoundaryNorm
from .cm import ScalarMappable
from .path import Path
from .transforms import (Bbox, IdentityTransform, Transform, TransformedBbox,
TransformedPatchPath, TransformedPath)
def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ...
def _prevent_rasterization(draw):
# We assume that by default artists are not allowed to rasterize (unless
# its draw method is explicitly decorated). If it is being drawn after a
# rasterized artist and it has reached a raster_depth of 0, we stop
# rasterization so that it does not affect the behavior of normal artist
# (e.g., change in dpi).
@wraps(draw)
def draw_wrapper(artist, renderer, *args, **kwargs):
if renderer._raster_depth == 0 and renderer._rasterizing:
# Only stop when we are not in a rasterized parent
# and something has been rasterized since last stop.
renderer.stop_rasterizing()
renderer._rasterizing = False
return draw(artist, renderer, *args, **kwargs)
draw_wrapper._supports_rasterization = False
return draw_wrapper | null |
170,974 | from collections import namedtuple
import contextlib
from functools import lru_cache, wraps
import inspect
from inspect import Signature, Parameter
import logging
from numbers import Number
import re
import warnings
import numpy as np
import matplotlib as mpl
from . import _api, cbook
from .colors import BoundaryNorm
from .cm import ScalarMappable
from .path import Path
from .transforms import (Bbox, IdentityTransform, Transform, TransformedBbox,
TransformedPatchPath, TransformedPath)
def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ...
The provided code snippet includes necessary dependencies for implementing the `_finalize_rasterization` function. Write a Python function `def _finalize_rasterization(draw)` to solve the following problem:
Decorator for Artist.draw method. Needed on the outermost artist, i.e. Figure, to finish up if the render is still in rasterized mode.
Here is the function:
def _finalize_rasterization(draw):
"""
Decorator for Artist.draw method. Needed on the outermost artist, i.e.
Figure, to finish up if the render is still in rasterized mode.
"""
@wraps(draw)
def draw_wrapper(artist, renderer, *args, **kwargs):
result = draw(artist, renderer, *args, **kwargs)
if renderer._rasterizing:
renderer.stop_rasterizing()
renderer._rasterizing = False
return result
return draw_wrapper | Decorator for Artist.draw method. Needed on the outermost artist, i.e. Figure, to finish up if the render is still in rasterized mode. |
170,975 | from collections import namedtuple
import contextlib
from functools import lru_cache, wraps
import inspect
from inspect import Signature, Parameter
import logging
from numbers import Number
import re
import warnings
import numpy as np
import matplotlib as mpl
from . import _api, cbook
from .colors import BoundaryNorm
from .cm import ScalarMappable
from .path import Path
from .transforms import (Bbox, IdentityTransform, Transform, TransformedBbox,
TransformedPatchPath, TransformedPath)
def _stale_axes_callback(self, val):
if self.axes:
self.axes.stale = val | null |
170,976 | from collections import namedtuple
import contextlib
from functools import lru_cache, wraps
import inspect
from inspect import Signature, Parameter
import logging
from numbers import Number
import re
import warnings
import numpy as np
import matplotlib as mpl
from . import _api, cbook
from .colors import BoundaryNorm
from .cm import ScalarMappable
from .path import Path
from .transforms import (Bbox, IdentityTransform, Transform, TransformedBbox,
TransformedPatchPath, TransformedPath)
class ArtistInspector:
"""
A helper class to inspect an `~matplotlib.artist.Artist` and return
information about its settable properties and their current values.
"""
def __init__(self, o):
r"""
Initialize the artist inspector with an `Artist` or an iterable of
`Artist`\s. If an iterable is used, we assume it is a homogeneous
sequence (all `Artist`\s are of the same type) and it is your
responsibility to make sure this is so.
"""
if not isinstance(o, Artist):
if np.iterable(o):
o = list(o)
if len(o):
o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping property fullnames to sets of aliases for each alias
in the :class:`~matplotlib.artist.ArtistInspector`.
e.g., for lines::
{'markerfacecolor': {'mfc'},
'linewidth' : {'lw'},
}
"""
names = [name for name in dir(self.o)
if name.startswith(('set_', 'get_'))
and callable(getattr(self.o, name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func):
continue
propname = re.search("`({}.*)`".format(name[:4]), # get_.*/set_.*
inspect.getdoc(func)).group(1)
aliases.setdefault(propname[4:], set()).add(name[4:])
return aliases
_get_valid_values_regex = re.compile(
r"\n\s*(?:\.\.\s+)?ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))"
)
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the setter for a line that
begins with "ACCEPTS:" or ".. ACCEPTS:", and then by looking for a
numpydoc-style documentation for the setter's first argument.
"""
name = 'set_%s' % attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s' % (self.o, name))
func = getattr(self.o, name)
docstring = inspect.getdoc(func)
if docstring is None:
return 'unknown'
if docstring.startswith('Alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return re.sub("\n *", " ", match.group(1))
# Much faster than list(inspect.signature(func).parameters)[1],
# although barely relevant wrt. matplotlib's total import time.
param_name = func.__code__.co_varnames[1]
# We could set the presence * based on whether the parameter is a
# varargs (it can't be a varkwargs) but it's not really worth it.
match = re.search(r"(?m)^ *\*?{} : (.+)".format(param_name), docstring)
if match:
return match.group(1)
return 'unknown'
def _replace_path(self, source_class):
"""
Changes the full path to the public API path that is used
in sphinx. This is needed for links to work.
"""
replace_dict = {'_base._AxesBase': 'Axes',
'_axes.Axes': 'Axes'}
for key, value in replace_dict.items():
source_class = source_class.replace(key, value)
return source_class
def get_setters(self):
"""
Get the attribute strings with setters for object.
For example, for a line, return ``['markerfacecolor', 'linewidth',
....]``.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'):
continue
func = getattr(self.o, name)
if (not callable(func)
or self.number_of_parameters(func) < 2
or self.is_alias(func)):
continue
setters.append(name[4:])
return setters
def number_of_parameters(func):
"""Return number of parameters of the callable *func*."""
return len(inspect.signature(func).parameters)
def is_alias(method):
"""
Return whether the object *method* is an alias for another method.
"""
ds = inspect.getdoc(method)
if ds is None:
return False
return ds.startswith('Alias for ')
def aliased_name(self, s):
"""
Return 'PROPNAME or alias' if *s* has an alias, else return 'PROPNAME'.
For example, for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'.
"""
aliases = ''.join(' or %s' % x for x in sorted(self.aliasd.get(s, [])))
return s + aliases
_NOT_LINKABLE = {
# A set of property setter methods that are not available in our
# current docs. This is a workaround used to prevent trying to link
# these setters which would lead to "target reference not found"
# warnings during doc build.
'matplotlib.image._ImageBase.set_alpha',
'matplotlib.image._ImageBase.set_array',
'matplotlib.image._ImageBase.set_data',
'matplotlib.image._ImageBase.set_filternorm',
'matplotlib.image._ImageBase.set_filterrad',
'matplotlib.image._ImageBase.set_interpolation',
'matplotlib.image._ImageBase.set_interpolation_stage',
'matplotlib.image._ImageBase.set_resample',
'matplotlib.text._AnnotationBase.set_annotation_clip',
}
def aliased_name_rest(self, s, target):
"""
Return 'PROPNAME or alias' if *s* has an alias, else return 'PROPNAME',
formatted for reST.
For example, for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'.
"""
# workaround to prevent "reference target not found"
if target in self._NOT_LINKABLE:
return f'``{s}``'
aliases = ''.join(' or %s' % x for x in sorted(self.aliasd.get(s, [])))
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable
properties and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
lines = []
for prop in sorted(self.get_setters()):
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=4):
"""
If *prop* is *None*, return a list of reST-formatted strings of all
settable properties and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of "property : valid"
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
prop_and_qualnames = []
for prop in sorted(self.get_setters()):
# Find the parent method which actually provides the docstring.
for cls in self.o.__mro__:
method = getattr(cls, f"set_{prop}", None)
if method and method.__doc__ is not None:
break
else: # No docstring available.
method = getattr(self.o, f"set_{prop}")
prop_and_qualnames.append(
(prop, f"{method.__module__}.{method.__qualname__}"))
names = [self.aliased_name_rest(prop, target)
.replace('_base._AxesBase', 'Axes')
.replace('_axes.Axes', 'Axes')
for prop, target in prop_and_qualnames]
accepts = [self.get_valid_values(prop)
for prop, _ in prop_and_qualnames]
col0_len = max(len(n) for n in names)
col1_len = max(len(a) for a in accepts)
table_formatstr = pad + ' ' + '=' * col0_len + ' ' + '=' * col1_len
return [
'',
pad + '.. table::',
pad + ' :class: property-table',
'',
table_formatstr,
pad + ' ' + 'Property'.ljust(col0_len)
+ ' ' + 'Description'.ljust(col1_len),
table_formatstr,
*[pad + ' ' + n.ljust(col0_len) + ' ' + a.ljust(col1_len)
for n, a in zip(names, accepts)],
table_formatstr,
'',
]
def properties(self):
"""Return a dictionary mapping property name -> value."""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_') and callable(getattr(o, name))]
getters.sort()
d = {}
for name in getters:
func = getattr(o, name)
if self.is_alias(func):
continue
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
val = func()
except Exception:
continue
else:
d[name[4:]] = val
return d
def pprint_getters(self):
"""Return the getters and actual values as list of strings."""
lines = []
for name, val in sorted(self.properties().items()):
if getattr(val, 'shape', ()) != () and len(val) > 6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s) > 50:
s = s[:50] + '...'
name = self.aliased_name(name)
lines.append(' %s = %s' % (name, s))
return lines
The provided code snippet includes necessary dependencies for implementing the `getp` function. Write a Python function `def getp(obj, property=None)` to solve the following problem:
Return the value of an `.Artist`'s *property*, or print all of them. Parameters ---------- obj : `.Artist` The queried artist; e.g., a `.Line2D`, a `.Text`, or an `~.axes.Axes`. property : str or None, default: None If *property* is 'somename', this function returns ``obj.get_somename()``. If it's None (or unset), it *prints* all gettable properties from *obj*. Many properties have aliases for shorter typing, e.g. 'lw' is an alias for 'linewidth'. In the output, aliases and full property names will be listed as: property or alias = value e.g.: linewidth or lw = 2 See Also -------- setp
Here is the function:
def getp(obj, property=None):
"""
Return the value of an `.Artist`'s *property*, or print all of them.
Parameters
----------
obj : `.Artist`
The queried artist; e.g., a `.Line2D`, a `.Text`, or an `~.axes.Axes`.
property : str or None, default: None
If *property* is 'somename', this function returns
``obj.get_somename()``.
If it's None (or unset), it *prints* all gettable properties from
*obj*. Many properties have aliases for shorter typing, e.g. 'lw' is
an alias for 'linewidth'. In the output, aliases and full property
names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
See Also
--------
setp
"""
if property is None:
insp = ArtistInspector(obj)
ret = insp.pprint_getters()
print('\n'.join(ret))
return
return getattr(obj, 'get_' + property)() | Return the value of an `.Artist`'s *property*, or print all of them. Parameters ---------- obj : `.Artist` The queried artist; e.g., a `.Line2D`, a `.Text`, or an `~.axes.Axes`. property : str or None, default: None If *property* is 'somename', this function returns ``obj.get_somename()``. If it's None (or unset), it *prints* all gettable properties from *obj*. Many properties have aliases for shorter typing, e.g. 'lw' is an alias for 'linewidth'. In the output, aliases and full property names will be listed as: property or alias = value e.g.: linewidth or lw = 2 See Also -------- setp |
170,977 | from collections import namedtuple
import contextlib
from functools import lru_cache, wraps
import inspect
from inspect import Signature, Parameter
import logging
from numbers import Number
import re
import warnings
import numpy as np
import matplotlib as mpl
from . import _api, cbook
from .colors import BoundaryNorm
from .cm import ScalarMappable
from .path import Path
from .transforms import (Bbox, IdentityTransform, Transform, TransformedBbox,
TransformedPatchPath, TransformedPath)
class ArtistInspector:
"""
A helper class to inspect an `~matplotlib.artist.Artist` and return
information about its settable properties and their current values.
"""
def __init__(self, o):
r"""
Initialize the artist inspector with an `Artist` or an iterable of
`Artist`\s. If an iterable is used, we assume it is a homogeneous
sequence (all `Artist`\s are of the same type) and it is your
responsibility to make sure this is so.
"""
if not isinstance(o, Artist):
if np.iterable(o):
o = list(o)
if len(o):
o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping property fullnames to sets of aliases for each alias
in the :class:`~matplotlib.artist.ArtistInspector`.
e.g., for lines::
{'markerfacecolor': {'mfc'},
'linewidth' : {'lw'},
}
"""
names = [name for name in dir(self.o)
if name.startswith(('set_', 'get_'))
and callable(getattr(self.o, name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func):
continue
propname = re.search("`({}.*)`".format(name[:4]), # get_.*/set_.*
inspect.getdoc(func)).group(1)
aliases.setdefault(propname[4:], set()).add(name[4:])
return aliases
_get_valid_values_regex = re.compile(
r"\n\s*(?:\.\.\s+)?ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))"
)
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the setter for a line that
begins with "ACCEPTS:" or ".. ACCEPTS:", and then by looking for a
numpydoc-style documentation for the setter's first argument.
"""
name = 'set_%s' % attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s' % (self.o, name))
func = getattr(self.o, name)
docstring = inspect.getdoc(func)
if docstring is None:
return 'unknown'
if docstring.startswith('Alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return re.sub("\n *", " ", match.group(1))
# Much faster than list(inspect.signature(func).parameters)[1],
# although barely relevant wrt. matplotlib's total import time.
param_name = func.__code__.co_varnames[1]
# We could set the presence * based on whether the parameter is a
# varargs (it can't be a varkwargs) but it's not really worth it.
match = re.search(r"(?m)^ *\*?{} : (.+)".format(param_name), docstring)
if match:
return match.group(1)
return 'unknown'
def _replace_path(self, source_class):
"""
Changes the full path to the public API path that is used
in sphinx. This is needed for links to work.
"""
replace_dict = {'_base._AxesBase': 'Axes',
'_axes.Axes': 'Axes'}
for key, value in replace_dict.items():
source_class = source_class.replace(key, value)
return source_class
def get_setters(self):
"""
Get the attribute strings with setters for object.
For example, for a line, return ``['markerfacecolor', 'linewidth',
....]``.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'):
continue
func = getattr(self.o, name)
if (not callable(func)
or self.number_of_parameters(func) < 2
or self.is_alias(func)):
continue
setters.append(name[4:])
return setters
def number_of_parameters(func):
"""Return number of parameters of the callable *func*."""
return len(inspect.signature(func).parameters)
def is_alias(method):
"""
Return whether the object *method* is an alias for another method.
"""
ds = inspect.getdoc(method)
if ds is None:
return False
return ds.startswith('Alias for ')
def aliased_name(self, s):
"""
Return 'PROPNAME or alias' if *s* has an alias, else return 'PROPNAME'.
For example, for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'.
"""
aliases = ''.join(' or %s' % x for x in sorted(self.aliasd.get(s, [])))
return s + aliases
_NOT_LINKABLE = {
# A set of property setter methods that are not available in our
# current docs. This is a workaround used to prevent trying to link
# these setters which would lead to "target reference not found"
# warnings during doc build.
'matplotlib.image._ImageBase.set_alpha',
'matplotlib.image._ImageBase.set_array',
'matplotlib.image._ImageBase.set_data',
'matplotlib.image._ImageBase.set_filternorm',
'matplotlib.image._ImageBase.set_filterrad',
'matplotlib.image._ImageBase.set_interpolation',
'matplotlib.image._ImageBase.set_interpolation_stage',
'matplotlib.image._ImageBase.set_resample',
'matplotlib.text._AnnotationBase.set_annotation_clip',
}
def aliased_name_rest(self, s, target):
"""
Return 'PROPNAME or alias' if *s* has an alias, else return 'PROPNAME',
formatted for reST.
For example, for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'.
"""
# workaround to prevent "reference target not found"
if target in self._NOT_LINKABLE:
return f'``{s}``'
aliases = ''.join(' or %s' % x for x in sorted(self.aliasd.get(s, [])))
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable
properties and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
lines = []
for prop in sorted(self.get_setters()):
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=4):
"""
If *prop* is *None*, return a list of reST-formatted strings of all
settable properties and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of "property : valid"
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
prop_and_qualnames = []
for prop in sorted(self.get_setters()):
# Find the parent method which actually provides the docstring.
for cls in self.o.__mro__:
method = getattr(cls, f"set_{prop}", None)
if method and method.__doc__ is not None:
break
else: # No docstring available.
method = getattr(self.o, f"set_{prop}")
prop_and_qualnames.append(
(prop, f"{method.__module__}.{method.__qualname__}"))
names = [self.aliased_name_rest(prop, target)
.replace('_base._AxesBase', 'Axes')
.replace('_axes.Axes', 'Axes')
for prop, target in prop_and_qualnames]
accepts = [self.get_valid_values(prop)
for prop, _ in prop_and_qualnames]
col0_len = max(len(n) for n in names)
col1_len = max(len(a) for a in accepts)
table_formatstr = pad + ' ' + '=' * col0_len + ' ' + '=' * col1_len
return [
'',
pad + '.. table::',
pad + ' :class: property-table',
'',
table_formatstr,
pad + ' ' + 'Property'.ljust(col0_len)
+ ' ' + 'Description'.ljust(col1_len),
table_formatstr,
*[pad + ' ' + n.ljust(col0_len) + ' ' + a.ljust(col1_len)
for n, a in zip(names, accepts)],
table_formatstr,
'',
]
def properties(self):
"""Return a dictionary mapping property name -> value."""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_') and callable(getattr(o, name))]
getters.sort()
d = {}
for name in getters:
func = getattr(o, name)
if self.is_alias(func):
continue
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
val = func()
except Exception:
continue
else:
d[name[4:]] = val
return d
def pprint_getters(self):
"""Return the getters and actual values as list of strings."""
lines = []
for name, val in sorted(self.properties().items()):
if getattr(val, 'shape', ()) != () and len(val) > 6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s) > 50:
s = s[:50] + '...'
name = self.aliased_name(name)
lines.append(' %s = %s' % (name, s))
return lines
The provided code snippet includes necessary dependencies for implementing the `kwdoc` function. Write a Python function `def kwdoc(artist)` to solve the following problem:
r""" Inspect an `~matplotlib.artist.Artist` class (using `.ArtistInspector`) and return information about its settable properties and their current values. Parameters ---------- artist : `~matplotlib.artist.Artist` or an iterable of `Artist`\s Returns ------- str The settable properties of *artist*, as plain text if :rc:`docstring.hardcopy` is False and as a rst table (intended for use in Sphinx) if it is True.
Here is the function:
def kwdoc(artist):
r"""
Inspect an `~matplotlib.artist.Artist` class (using `.ArtistInspector`) and
return information about its settable properties and their current values.
Parameters
----------
artist : `~matplotlib.artist.Artist` or an iterable of `Artist`\s
Returns
-------
str
The settable properties of *artist*, as plain text if
:rc:`docstring.hardcopy` is False and as a rst table (intended for
use in Sphinx) if it is True.
"""
ai = ArtistInspector(artist)
return ('\n'.join(ai.pprint_setters_rest(leadingspace=4))
if mpl.rcParams['docstring.hardcopy'] else
'Properties:\n' + '\n'.join(ai.pprint_setters(leadingspace=4))) | r""" Inspect an `~matplotlib.artist.Artist` class (using `.ArtistInspector`) and return information about its settable properties and their current values. Parameters ---------- artist : `~matplotlib.artist.Artist` or an iterable of `Artist`\s Returns ------- str The settable properties of *artist*, as plain text if :rc:`docstring.hardcopy` is False and as a rst table (intended for use in Sphinx) if it is True. |
170,978 | import datetime
import functools
import logging
import math
import re
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import dateutil.tz
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, ticker, units
_epoch = None
The provided code snippet includes necessary dependencies for implementing the `_reset_epoch_test_example` function. Write a Python function `def _reset_epoch_test_example()` to solve the following problem:
Reset the Matplotlib date epoch so it can be set again. Only for use in tests and examples.
Here is the function:
def _reset_epoch_test_example():
"""
Reset the Matplotlib date epoch so it can be set again.
Only for use in tests and examples.
"""
global _epoch
_epoch = None | Reset the Matplotlib date epoch so it can be set again. Only for use in tests and examples. |
170,979 | import datetime
import functools
import logging
import math
import re
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import dateutil.tz
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, ticker, units
_epoch = None
The provided code snippet includes necessary dependencies for implementing the `set_epoch` function. Write a Python function `def set_epoch(epoch)` to solve the following problem:
Set the epoch (origin for dates) for datetime calculations. The default epoch is :rc:`dates.epoch` (by default 1970-01-01T00:00). If microsecond accuracy is desired, the date being plotted needs to be within approximately 70 years of the epoch. Matplotlib internally represents dates as days since the epoch, so floating point dynamic range needs to be within a factor of 2^52. `~.dates.set_epoch` must be called before any dates are converted (i.e. near the import section) or a RuntimeError will be raised. See also :doc:`/gallery/ticks/date_precision_and_epochs`. Parameters ---------- epoch : str valid UTC date parsable by `numpy.datetime64` (do not include timezone).
Here is the function:
def set_epoch(epoch):
"""
Set the epoch (origin for dates) for datetime calculations.
The default epoch is :rc:`dates.epoch` (by default 1970-01-01T00:00).
If microsecond accuracy is desired, the date being plotted needs to be
within approximately 70 years of the epoch. Matplotlib internally
represents dates as days since the epoch, so floating point dynamic
range needs to be within a factor of 2^52.
`~.dates.set_epoch` must be called before any dates are converted
(i.e. near the import section) or a RuntimeError will be raised.
See also :doc:`/gallery/ticks/date_precision_and_epochs`.
Parameters
----------
epoch : str
valid UTC date parsable by `numpy.datetime64` (do not include
timezone).
"""
global _epoch
if _epoch is not None:
raise RuntimeError('set_epoch must be called before dates plotted.')
_epoch = epoch | Set the epoch (origin for dates) for datetime calculations. The default epoch is :rc:`dates.epoch` (by default 1970-01-01T00:00). If microsecond accuracy is desired, the date being plotted needs to be within approximately 70 years of the epoch. Matplotlib internally represents dates as days since the epoch, so floating point dynamic range needs to be within a factor of 2^52. `~.dates.set_epoch` must be called before any dates are converted (i.e. near the import section) or a RuntimeError will be raised. See also :doc:`/gallery/ticks/date_precision_and_epochs`. Parameters ---------- epoch : str valid UTC date parsable by `numpy.datetime64` (do not include timezone). |
170,980 | import datetime
import functools
import logging
import math
import re
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import dateutil.tz
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, ticker, units
def _get_tzinfo(tz=None):
"""
Generate `~datetime.tzinfo` from a string or return `~datetime.tzinfo`.
If None, retrieve the preferred timezone from the rcParams dictionary.
"""
if tz is None:
tz = mpl.rcParams['timezone']
if tz == 'UTC':
return UTC
if isinstance(tz, str):
tzinfo = dateutil.tz.gettz(tz)
if tzinfo is None:
raise ValueError(f"{tz} is not a valid timezone as parsed by"
" dateutil.tz.gettz.")
return tzinfo
if isinstance(tz, datetime.tzinfo):
return tz
raise TypeError("tz must be string or tzinfo subclass.")
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
def get_epoch():
"""
Get the epoch used by `.dates`.
Returns
-------
epoch : str
String for the epoch (parsable by `numpy.datetime64`).
"""
global _epoch
if _epoch is None:
_epoch = mpl.rcParams['date.epoch']
return _epoch
The provided code snippet includes necessary dependencies for implementing the `_from_ordinalf` function. Write a Python function `def _from_ordinalf(x, tz=None)` to solve the following problem:
Convert Gregorian float of the date, preserving hours, minutes, seconds and microseconds. Return value is a `.datetime`. The input date *x* is a float in ordinal days at UTC, and the output will be the specified `.datetime` object corresponding to that time in timezone *tz*, or if *tz* is ``None``, in the timezone specified in :rc:`timezone`.
Here is the function:
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a `.datetime`.
The input date *x* is a float in ordinal days at UTC, and the output will
be the specified `.datetime` object corresponding to that time in
timezone *tz*, or if *tz* is ``None``, in the timezone specified in
:rc:`timezone`.
"""
tz = _get_tzinfo(tz)
dt = (np.datetime64(get_epoch()) +
np.timedelta64(int(np.round(x * MUSECONDS_PER_DAY)), 'us'))
if dt < np.datetime64('0001-01-01') or dt >= np.datetime64('10000-01-01'):
raise ValueError(f'Date ordinal {x} converts to {dt} (using '
f'epoch {get_epoch()}), but Matplotlib dates must be '
'between year 0001 and 9999.')
# convert from datetime64 to datetime:
dt = dt.tolist()
# datetime64 is always UTC:
dt = dt.replace(tzinfo=dateutil.tz.gettz('UTC'))
# but maybe we are working in a different timezone so move.
dt = dt.astimezone(tz)
# fix round off errors
if np.abs(x) > 70 * 365:
# if x is big, round off to nearest twenty microseconds.
# This avoids floating point roundoff error
ms = round(dt.microsecond / 20) * 20
if ms == 1000000:
dt = dt.replace(microsecond=0) + datetime.timedelta(seconds=1)
else:
dt = dt.replace(microsecond=ms)
return dt | Convert Gregorian float of the date, preserving hours, minutes, seconds and microseconds. Return value is a `.datetime`. The input date *x* is a float in ordinal days at UTC, and the output will be the specified `.datetime` object corresponding to that time in timezone *tz*, or if *tz* is ``None``, in the timezone specified in :rc:`timezone`. |
170,981 | import datetime
import functools
import logging
import math
import re
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import dateutil.tz
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, ticker, units
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def date2num(d):
"""
Convert datetime objects to Matplotlib dates.
Parameters
----------
d : `datetime.datetime` or `numpy.datetime64` or sequences of these
Returns
-------
float or sequence of floats
Number of days since the epoch. See `.get_epoch` for the
epoch, which can be changed by :rc:`date.epoch` or `.set_epoch`. If
the epoch is "1970-01-01T00:00:00" (default) then noon Jan 1 1970
("1970-01-01T12:00:00") returns 0.5.
Notes
-----
The Gregorian calendar is assumed; this is not universal practice.
For details see the module docstring.
"""
# Unpack in case of e.g. Pandas or xarray object
d = cbook._unpack_to_numpy(d)
# make an iterable, but save state to unpack later:
iterable = np.iterable(d)
if not iterable:
d = [d]
masked = np.ma.is_masked(d)
mask = np.ma.getmask(d)
d = np.asarray(d)
# convert to datetime64 arrays, if not already:
if not np.issubdtype(d.dtype, np.datetime64):
# datetime arrays
if not d.size:
# deals with an empty array...
return d
tzi = getattr(d[0], 'tzinfo', None)
if tzi is not None:
# make datetime naive:
d = [dt.astimezone(UTC).replace(tzinfo=None) for dt in d]
d = np.asarray(d)
d = d.astype('datetime64[us]')
d = np.ma.masked_array(d, mask=mask) if masked else d
d = _dt64_to_ordinalf(d)
return d if iterable else d[0]
The provided code snippet includes necessary dependencies for implementing the `datestr2num` function. Write a Python function `def datestr2num(d, default=None)` to solve the following problem:
Convert a date string to a datenum using `dateutil.parser.parse`. Parameters ---------- d : str or sequence of str The dates to convert. default : datetime.datetime, optional The default date to use when fields are missing in *d*.
Here is the function:
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using `dateutil.parser.parse`.
Parameters
----------
d : str or sequence of str
The dates to convert.
default : datetime.datetime, optional
The default date to use when fields are missing in *d*.
"""
if isinstance(d, str):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [date2num(dateutil.parser.parse(s, default=default))
for s in d]
return np.asarray(d)
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d)) | Convert a date string to a datenum using `dateutil.parser.parse`. Parameters ---------- d : str or sequence of str The dates to convert. default : datetime.datetime, optional The default date to use when fields are missing in *d*. |
170,982 | import datetime
import functools
import logging
import math
import re
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import dateutil.tz
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, ticker, units
class __getattr__:
JULIAN_OFFSET = _api.deprecated("3.7")(property(lambda self: 1721424.5))
# Julian date at 0000-12-31
# note that the Julian day epoch is achievable w/
# np.datetime64('-4713-11-24T12:00:00'); datetime64 is proleptic
# Gregorian and BC has a one-year offset. So
# np.datetime64('0000-12-31') - np.datetime64('-4713-11-24T12:00') =
# 1721424.5
# Ref: https://en.wikipedia.org/wiki/Julian_day
def get_epoch():
"""
Get the epoch used by `.dates`.
Returns
-------
epoch : str
String for the epoch (parsable by `numpy.datetime64`).
"""
global _epoch
if _epoch is None:
_epoch = mpl.rcParams['date.epoch']
return _epoch
The provided code snippet includes necessary dependencies for implementing the `julian2num` function. Write a Python function `def julian2num(j)` to solve the following problem:
Convert a Julian date (or sequence) to a Matplotlib date (or sequence). Parameters ---------- j : float or sequence of floats Julian dates (days relative to 4713 BC Jan 1, 12:00:00 Julian calendar or 4714 BC Nov 24, 12:00:00, proleptic Gregorian calendar). Returns ------- float or sequence of floats Matplotlib dates (days relative to `.get_epoch`).
Here is the function:
def julian2num(j):
"""
Convert a Julian date (or sequence) to a Matplotlib date (or sequence).
Parameters
----------
j : float or sequence of floats
Julian dates (days relative to 4713 BC Jan 1, 12:00:00 Julian
calendar or 4714 BC Nov 24, 12:00:00, proleptic Gregorian calendar).
Returns
-------
float or sequence of floats
Matplotlib dates (days relative to `.get_epoch`).
"""
ep = np.datetime64(get_epoch(), 'h').astype(float) / 24.
ep0 = np.datetime64('0000-12-31T00:00:00', 'h').astype(float) / 24.
# Julian offset defined above is relative to 0000-12-31, but we need
# relative to our current epoch:
dt = __getattr__("JULIAN_OFFSET") - ep0 + ep
return np.subtract(j, dt) # Handles both scalar & nonscalar j. | Convert a Julian date (or sequence) to a Matplotlib date (or sequence). Parameters ---------- j : float or sequence of floats Julian dates (days relative to 4713 BC Jan 1, 12:00:00 Julian calendar or 4714 BC Nov 24, 12:00:00, proleptic Gregorian calendar). Returns ------- float or sequence of floats Matplotlib dates (days relative to `.get_epoch`). |
170,983 | import datetime
import functools
import logging
import math
import re
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import dateutil.tz
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, ticker, units
class __getattr__:
JULIAN_OFFSET = _api.deprecated("3.7")(property(lambda self: 1721424.5))
# Julian date at 0000-12-31
# note that the Julian day epoch is achievable w/
# np.datetime64('-4713-11-24T12:00:00'); datetime64 is proleptic
# Gregorian and BC has a one-year offset. So
# np.datetime64('0000-12-31') - np.datetime64('-4713-11-24T12:00') =
# 1721424.5
# Ref: https://en.wikipedia.org/wiki/Julian_day
def get_epoch():
"""
Get the epoch used by `.dates`.
Returns
-------
epoch : str
String for the epoch (parsable by `numpy.datetime64`).
"""
global _epoch
if _epoch is None:
_epoch = mpl.rcParams['date.epoch']
return _epoch
The provided code snippet includes necessary dependencies for implementing the `num2julian` function. Write a Python function `def num2julian(n)` to solve the following problem:
Convert a Matplotlib date (or sequence) to a Julian date (or sequence). Parameters ---------- n : float or sequence of floats Matplotlib dates (days relative to `.get_epoch`). Returns ------- float or sequence of floats Julian dates (days relative to 4713 BC Jan 1, 12:00:00).
Here is the function:
def num2julian(n):
"""
Convert a Matplotlib date (or sequence) to a Julian date (or sequence).
Parameters
----------
n : float or sequence of floats
Matplotlib dates (days relative to `.get_epoch`).
Returns
-------
float or sequence of floats
Julian dates (days relative to 4713 BC Jan 1, 12:00:00).
"""
ep = np.datetime64(get_epoch(), 'h').astype(float) / 24.
ep0 = np.datetime64('0000-12-31T00:00:00', 'h').astype(float) / 24.
# Julian offset defined above is relative to 0000-12-31, but we need
# relative to our current epoch:
dt = __getattr__("JULIAN_OFFSET") - ep0 + ep
return np.add(n, dt) # Handles both scalar & nonscalar j. | Convert a Matplotlib date (or sequence) to a Julian date (or sequence). Parameters ---------- n : float or sequence of floats Matplotlib dates (days relative to `.get_epoch`). Returns ------- float or sequence of floats Julian dates (days relative to 4713 BC Jan 1, 12:00:00). |
170,984 | import datetime
import functools
import logging
import math
import re
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import dateutil.tz
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, ticker, units
_ordinalf_to_timedelta_np_vectorized = np.vectorize(
lambda x: datetime.timedelta(days=x), otypes="O")
The provided code snippet includes necessary dependencies for implementing the `num2timedelta` function. Write a Python function `def num2timedelta(x)` to solve the following problem:
Convert number of days to a `~datetime.timedelta` object. If *x* is a sequence, a sequence of `~datetime.timedelta` objects will be returned. Parameters ---------- x : float, sequence of floats Number of days. The fraction part represents hours, minutes, seconds. Returns ------- `datetime.timedelta` or list[`datetime.timedelta`]
Here is the function:
def num2timedelta(x):
"""
Convert number of days to a `~datetime.timedelta` object.
If *x* is a sequence, a sequence of `~datetime.timedelta` objects will
be returned.
Parameters
----------
x : float, sequence of floats
Number of days. The fraction part represents hours, minutes, seconds.
Returns
-------
`datetime.timedelta` or list[`datetime.timedelta`]
"""
return _ordinalf_to_timedelta_np_vectorized(x).tolist() | Convert number of days to a `~datetime.timedelta` object. If *x* is a sequence, a sequence of `~datetime.timedelta` objects will be returned. Parameters ---------- x : float, sequence of floats Number of days. The fraction part represents hours, minutes, seconds. Returns ------- `datetime.timedelta` or list[`datetime.timedelta`] |
170,985 | import datetime
import functools
import logging
import math
import re
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import dateutil.tz
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, ticker, units
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
def date2num(d):
"""
Convert datetime objects to Matplotlib dates.
Parameters
----------
d : `datetime.datetime` or `numpy.datetime64` or sequences of these
Returns
-------
float or sequence of floats
Number of days since the epoch. See `.get_epoch` for the
epoch, which can be changed by :rc:`date.epoch` or `.set_epoch`. If
the epoch is "1970-01-01T00:00:00" (default) then noon Jan 1 1970
("1970-01-01T12:00:00") returns 0.5.
Notes
-----
The Gregorian calendar is assumed; this is not universal practice.
For details see the module docstring.
"""
# Unpack in case of e.g. Pandas or xarray object
d = cbook._unpack_to_numpy(d)
# make an iterable, but save state to unpack later:
iterable = np.iterable(d)
if not iterable:
d = [d]
masked = np.ma.is_masked(d)
mask = np.ma.getmask(d)
d = np.asarray(d)
# convert to datetime64 arrays, if not already:
if not np.issubdtype(d.dtype, np.datetime64):
# datetime arrays
if not d.size:
# deals with an empty array...
return d
tzi = getattr(d[0], 'tzinfo', None)
if tzi is not None:
# make datetime naive:
d = [dt.astimezone(UTC).replace(tzinfo=None) for dt in d]
d = np.asarray(d)
d = d.astype('datetime64[us]')
d = np.ma.masked_array(d, mask=mask) if masked else d
d = _dt64_to_ordinalf(d)
return d if iterable else d[0]
The provided code snippet includes necessary dependencies for implementing the `drange` function. Write a Python function `def drange(dstart, dend, delta)` to solve the following problem:
Return a sequence of equally spaced Matplotlib dates. The dates start at *dstart* and reach up to, but not including *dend*. They are spaced by *delta*. Parameters ---------- dstart, dend : `~datetime.datetime` The date limits. delta : `datetime.timedelta` Spacing of the dates. Returns ------- `numpy.array` A list floats representing Matplotlib dates.
Here is the function:
def drange(dstart, dend, delta):
"""
Return a sequence of equally spaced Matplotlib dates.
The dates start at *dstart* and reach up to, but not including *dend*.
They are spaced by *delta*.
Parameters
----------
dstart, dend : `~datetime.datetime`
The date limits.
delta : `datetime.timedelta`
Spacing of the dates.
Returns
-------
`numpy.array`
A list floats representing Matplotlib dates.
"""
f1 = date2num(dstart)
f2 = date2num(dend)
step = delta.total_seconds() / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greater than or equal to dend,
# just subtract one delta
dinterval_end -= delta
num -= 1
f2 = date2num(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1) | Return a sequence of equally spaced Matplotlib dates. The dates start at *dstart* and reach up to, but not including *dend*. They are spaced by *delta*. Parameters ---------- dstart, dend : `~datetime.datetime` The date limits. delta : `datetime.timedelta` Spacing of the dates. Returns ------- `numpy.array` A list floats representing Matplotlib dates. |
170,986 | import datetime
import functools
import logging
import math
import re
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import dateutil.tz
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, ticker, units
def _wrap_in_tex(text):
p = r'([a-zA-Z]+)'
ret_text = re.sub(p, r'}$\1$\\mathdefault{', text)
# Braces ensure symbols are not spaced like binary operators.
ret_text = ret_text.replace('-', '{-}').replace(':', '{:}')
# To not concatenate space between numbers.
ret_text = ret_text.replace(' ', r'\;')
ret_text = '$\\mathdefault{' + ret_text + '}$'
ret_text = ret_text.replace('$\\mathdefault{}$', '')
return ret_text | null |
170,987 | import datetime
import functools
import logging
import math
import re
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import dateutil.tz
import numpy as np
import matplotlib as mpl
from matplotlib import _api, cbook, ticker, units
HOURS_PER_DAY = 24.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
class DateFormatter(ticker.Formatter):
"""
Format a tick (in days since the epoch) with a
`~datetime.datetime.strftime` format string.
"""
def __init__(self, fmt, tz=None, *, usetex=None):
"""
Parameters
----------
fmt : str
`~datetime.datetime.strftime` format string
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
usetex : bool, default: :rc:`text.usetex`
To enable/disable the use of TeX's math mode for rendering the
results of the formatter.
"""
self.tz = _get_tzinfo(tz)
self.fmt = fmt
self._usetex = (usetex if usetex is not None else
mpl.rcParams['text.usetex'])
def __call__(self, x, pos=0):
result = num2date(x, self.tz).strftime(self.fmt)
return _wrap_in_tex(result) if self._usetex else result
def set_tzinfo(self, tz):
self.tz = _get_tzinfo(tz)
class YearLocator(RRuleLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Parameters
----------
base : int, default: 1
Mark ticks every *base* years.
month : int, default: 1
The month on which to place the ticks, starting from 1. Default is
January.
day : int, default: 1
The day on which to place the ticks.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
rule = rrulewrapper(YEARLY, interval=base, bymonth=month,
bymonthday=day, **self.hms0d)
super().__init__(rule, tz=tz)
self.base = ticker._Edge_integer(base, 0)
def _create_rrule(self, vmin, vmax):
# 'start' needs to be a multiple of the interval to create ticks on
# interval multiples when the tick frequency is YEARLY
ymin = max(self.base.le(vmin.year) * self.base.step, 1)
ymax = min(self.base.ge(vmax.year) * self.base.step, 9999)
c = self.rule._construct
replace = {'year': ymin,
'month': c.get('bymonth', 1),
'day': c.get('bymonthday', 1),
'hour': 0, 'minute': 0, 'second': 0}
start = vmin.replace(**replace)
stop = start.replace(year=ymax)
self.rule.set(dtstart=start, until=stop)
return start, stop
class MonthLocator(RRuleLocator):
"""
Make ticks on occurrences of each month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Parameters
----------
bymonth : int or list of int, default: all months
Ticks will be placed on every month in *bymonth*. Default is
``range(1, 13)``, i.e. every month.
bymonthday : int, default: 1
The day on which to place the ticks.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
if bymonth is None:
bymonth = range(1, 13)
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
super().__init__(rule, tz=tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurrences of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Parameters
----------
byweekday : int or list of int, default: all days
Ticks will be placed on every weekday in *byweekday*. Default is
every day.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
super().__init__(rule, tz=tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurrences of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Parameters
----------
bymonthday : int or list of int, default: all days
Ticks will be placed on every day in *bymonthday*. Default is
``bymonthday=range(1, 32)``, i.e., every day of the month.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
if interval != int(interval) or interval < 1:
raise ValueError("interval must be an integer greater than 0")
if bymonthday is None:
bymonthday = range(1, 32)
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
super().__init__(rule, tz=tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurrences of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Parameters
----------
byhour : int or list of int, default: all hours
Ticks will be placed on every hour in *byhour*. Default is
``byhour=range(24)``, i.e., every hour.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
super().__init__(rule, tz=tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurrences of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Parameters
----------
byminute : int or list of int, default: all minutes
Ticks will be placed on every minute in *byminute*. Default is
``byminute=range(60)``, i.e., every minute.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
super().__init__(rule, tz=tz)
The provided code snippet includes necessary dependencies for implementing the `date_ticker_factory` function. Write a Python function `def date_ticker_factory(span, tz=None, numticks=5)` to solve the following problem:
Create a date locator with *numticks* (approx) and a date formatter for *span* in days. Return value is (locator, formatter).
Here is the function:
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=math.ceil(days / numticks), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=math.ceil(hrs / numticks), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=math.ceil(mins / numticks), tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter | Create a date locator with *numticks* (approx) and a date formatter for *span* in days. Return value is (locator, formatter). |
170,988 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def validate_any(s):
return s | null |
170,989 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def _validate_date(s):
try:
np.datetime64(s)
return s
except ValueError:
raise ValueError(
f'{s!r} should be a string that can be parsed by numpy.datetime64') | null |
170,990 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def validate_bool(b):
"""Convert b to ``bool`` or raise."""
if isinstance(b, str):
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
return False
else:
raise ValueError(f'Cannot convert {b!r} to bool')
def validate_axisbelow(s):
try:
return validate_bool(s)
except ValueError:
if isinstance(s, str):
if s == 'line':
return 'line'
raise ValueError(f'{s!r} cannot be interpreted as'
' True, False, or "line"') | null |
170,991 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
The provided code snippet includes necessary dependencies for implementing the `validate_dpi` function. Write a Python function `def validate_dpi(s)` to solve the following problem:
Confirm s is string 'figure' or convert s to float or raise.
Here is the function:
def validate_dpi(s):
"""Confirm s is string 'figure' or convert s to float or raise."""
if s == 'figure':
return s
try:
return float(s)
except ValueError as e:
raise ValueError(f'{s!r} is not string "figure" and '
f'could not convert {s!r} to float') from e | Confirm s is string 'figure' or convert s to float or raise. |
170,992 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
The provided code snippet includes necessary dependencies for implementing the `_make_type_validator` function. Write a Python function `def _make_type_validator(cls, *, allow_none=False)` to solve the following problem:
Return a validator that converts inputs to *cls* or raises (and possibly allows ``None`` as well).
Here is the function:
def _make_type_validator(cls, *, allow_none=False):
"""
Return a validator that converts inputs to *cls* or raises (and possibly
allows ``None`` as well).
"""
def validator(s):
if (allow_none and
(s is None or isinstance(s, str) and s.lower() == "none")):
return None
if cls is str and not isinstance(s, str):
raise ValueError(f'Could not convert {s!r} to str')
try:
return cls(s)
except (TypeError, ValueError) as e:
raise ValueError(
f'Could not convert {s!r} to {cls.__name__}') from e
validator.__name__ = f"validate_{cls.__name__}"
if allow_none:
validator.__name__ += "_or_None"
validator.__qualname__ = (
validator.__qualname__.rsplit(".", 1)[0] + "." + validator.__name__)
return validator | Return a validator that converts inputs to *cls* or raises (and possibly allows ``None`` as well). |
170,993 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
validate_string = _make_type_validator(str)
import os
)
if os.environ.get('MPLBACKEND'):
rcParams['backend'] = os.environ.get('MPLBACKEND')
def _validate_pathlike(s):
if isinstance(s, (str, os.PathLike)):
# Store value as str because savefig.directory needs to distinguish
# between "" (cwd) and "." (cwd, but gets updated by user selections).
return os.fsdecode(s)
else:
return validate_string(s) | null |
170,994 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
validate_int = _make_type_validator(int)
The provided code snippet includes necessary dependencies for implementing the `validate_fonttype` function. Write a Python function `def validate_fonttype(s)` to solve the following problem:
Confirm that this is a Postscript or PDF font type that we know how to convert to.
Here is the function:
def validate_fonttype(s):
"""
Confirm that this is a Postscript or PDF font type that we know how to
convert to.
"""
fonttypes = {'type3': 3,
'truetype': 42}
try:
fonttype = validate_int(s)
except ValueError:
try:
return fonttypes[s.lower()]
except KeyError as e:
raise ValueError('Supported Postscript/PDF font types are %s'
% list(fonttypes)) from e
else:
if fonttype not in fonttypes.values():
raise ValueError(
'Supported Postscript/PDF font types are %s' %
list(fonttypes.values()))
return fonttype | Confirm that this is a Postscript or PDF font type that we know how to convert to. |
170,995 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
_validate_standard_backends = ValidateInStrings(
'backend', all_backends, ignorecase=True)
_auto_backend_sentinel = object()
def validate_backend(s):
backend = (
s if s is _auto_backend_sentinel or s.startswith("module://")
else _validate_standard_backends(s))
return backend | null |
170,996 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False, *,
_deprecated_since=None):
"""*valid* is a list of legal strings."""
self.key = key
self.ignorecase = ignorecase
self._deprecated_since = _deprecated_since
def func(s):
if ignorecase:
return s.lower()
else:
return s
self.valid = {func(k): k for k in valid}
def __call__(self, s):
if self._deprecated_since:
name, = (k for k, v in globals().items() if v is self)
_api.warn_deprecated(
self._deprecated_since, name=name, obj_type="function")
if self.ignorecase and isinstance(s, str):
s = s.lower()
if s in self.valid:
return self.valid[s]
msg = (f"{s!r} is not a valid value for {self.key}; supported values "
f"are {[*self.valid.values()]}")
if (isinstance(s, str)
and (s.startswith('"') and s.endswith('"')
or s.startswith("'") and s.endswith("'"))
and s[1:-1] in self.valid):
msg += "; remove quotes surrounding your string"
raise ValueError(msg)
def _validate_toolbar(s):
s = ValidateInStrings(
'toolbar', ['None', 'toolbar2', 'toolmanager'], ignorecase=True)(s)
if s == 'toolmanager':
_api.warn_external(
"Treat the new Tool classes introduced in v1.5 as experimental "
"for now; the API and rcParam may change in future versions.")
return s | null |
170,997 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def validate_color(s):
"""Return a valid color arg."""
if isinstance(s, str):
if s.lower() == 'none':
return 'none'
if len(s) == 6 or len(s) == 8:
stmp = '#' + s
if is_color_like(stmp):
return stmp
if is_color_like(s):
return s
# If it is still valid, it must be a tuple (as a string from matplotlibrc).
try:
color = ast.literal_eval(s)
except (SyntaxError, ValueError):
pass
else:
if is_color_like(color):
return color
raise ValueError(f'{s!r} does not look like a color arg')
The provided code snippet includes necessary dependencies for implementing the `validate_color_or_inherit` function. Write a Python function `def validate_color_or_inherit(s)` to solve the following problem:
Return a valid color arg.
Here is the function:
def validate_color_or_inherit(s):
"""Return a valid color arg."""
if cbook._str_equal(s, 'inherit'):
return s
return validate_color(s) | Return a valid color arg. |
170,998 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def validate_color(s):
"""Return a valid color arg."""
if isinstance(s, str):
if s.lower() == 'none':
return 'none'
if len(s) == 6 or len(s) == 8:
stmp = '#' + s
if is_color_like(stmp):
return stmp
if is_color_like(s):
return s
# If it is still valid, it must be a tuple (as a string from matplotlibrc).
try:
color = ast.literal_eval(s)
except (SyntaxError, ValueError):
pass
else:
if is_color_like(color):
return color
raise ValueError(f'{s!r} does not look like a color arg')
def validate_color_or_auto(s):
if cbook._str_equal(s, 'auto'):
return s
return validate_color(s) | null |
170,999 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def validate_color(s):
def validate_color_for_prop_cycle(s):
# N-th color cycle syntax can't go into the color cycle.
if isinstance(s, str) and re.match("^C[0-9]$", s):
raise ValueError(f"Cannot put cycle reference ({s!r}) in prop_cycler")
return validate_color(s) | null |
171,000 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def is_color_like(c):
"""Return whether *c* can be interpreted as an RGB(A) color."""
# Special-case nth color syntax because it cannot be parsed during setup.
if _is_nth_color(c):
return True
try:
to_rgba(c)
except ValueError:
return False
else:
return True
def _validate_color_or_linecolor(s):
if cbook._str_equal(s, 'linecolor'):
return s
elif cbook._str_equal(s, 'mfc') or cbook._str_equal(s, 'markerfacecolor'):
return 'markerfacecolor'
elif cbook._str_equal(s, 'mec') or cbook._str_equal(s, 'markeredgecolor'):
return 'markeredgecolor'
elif s is None:
return None
elif isinstance(s, str) and len(s) == 6 or len(s) == 8:
stmp = '#' + s
if is_color_like(stmp):
return stmp
if s.lower() == 'none':
return None
elif is_color_like(s):
return s
raise ValueError(f'{s!r} does not look like a color arg') | null |
171,001 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
class Colormap:
"""
Baseclass for all scalar to RGBA mappings.
Typically, Colormap instances are used to convert data values (floats)
from the interval ``[0, 1]`` to the RGBA color that the respective
Colormap represents. For scaling of data into the ``[0, 1]`` interval see
`matplotlib.colors.Normalize`. Subclasses of `matplotlib.cm.ScalarMappable`
make heavy use of this ``data -> normalize -> map-to-color`` processing
chain.
"""
def __init__(self, name, N=256):
"""
Parameters
----------
name : str
The name of the colormap.
N : int
The number of RGB quantization levels.
"""
self.name = name
self.N = int(N) # ensure that N is always int
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = self.N
self._i_over = self.N + 1
self._i_bad = self.N + 2
self._isinit = False
#: When this colormap exists on a scalar mappable and colorbar_extend
#: is not False, colorbar creation will pick up ``colorbar_extend`` as
#: the default value for the ``extend`` keyword in the
#: `matplotlib.colorbar.Colorbar` constructor.
self.colorbar_extend = False
def __call__(self, X, alpha=None, bytes=False):
"""
Parameters
----------
X : float or int, `~numpy.ndarray` or scalar
The data value(s) to convert to RGBA.
For floats, *X* should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, *X* should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float or array-like or None
Alpha must be a scalar between 0 and 1, a sequence of such
floats with shape matching X, or None.
bytes : bool
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be uint8s in the interval
``[0, 255]``.
Returns
-------
Tuple of RGBA values if X is scalar, otherwise an array of
RGBA values with a shape of ``X.shape + (4, )``.
"""
if not self._isinit:
self._init()
# Take the bad mask from a masked array, or in all other cases defer
# np.isnan() to after we have converted to an array.
mask_bad = X.mask if np.ma.is_masked(X) else None
xa = np.array(X, copy=True)
if mask_bad is None:
mask_bad = np.isnan(xa)
if not xa.dtype.isnative:
xa = xa.byteswap().newbyteorder() # Native byteorder is faster.
if xa.dtype.kind == "f":
xa *= self.N
# Negative values are out of range, but astype(int) would
# truncate them towards zero.
xa[xa < 0] = -1
# xa == 1 (== N after multiplication) is not out of range.
xa[xa == self.N] = self.N - 1
# Avoid converting large positive values to negative integers.
np.clip(xa, -1, self.N, out=xa)
with np.errstate(invalid="ignore"):
# We need this cast for unsigned ints as well as floats
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
xa[xa > self.N - 1] = self._i_over
xa[xa < 0] = self._i_under
xa[mask_bad] = self._i_bad
lut = self._lut
if bytes:
lut = (lut * 255).astype(np.uint8)
rgba = lut.take(xa, axis=0, mode='clip')
if alpha is not None:
alpha = np.clip(alpha, 0, 1)
if bytes:
alpha *= 255 # Will be cast to uint8 upon assignment.
if alpha.shape not in [(), xa.shape]:
raise ValueError(
f"alpha is array-like but its shape {alpha.shape} does "
f"not match that of X {xa.shape}")
rgba[..., -1] = alpha
# If the "bad" color is all zeros, then ignore alpha input.
if (lut[-1] == 0).all() and np.any(mask_bad):
if np.iterable(mask_bad) and mask_bad.shape == xa.shape:
rgba[mask_bad] = (0, 0, 0, 0)
else:
rgba[..., :] = (0, 0, 0, 0)
if not np.iterable(X):
rgba = tuple(rgba)
return rgba
def __copy__(self):
cls = self.__class__
cmapobject = cls.__new__(cls)
cmapobject.__dict__.update(self.__dict__)
if self._isinit:
cmapobject._lut = np.copy(self._lut)
return cmapobject
def __eq__(self, other):
if (not isinstance(other, Colormap) or self.name != other.name or
self.colorbar_extend != other.colorbar_extend):
return False
# To compare lookup tables the Colormaps have to be initialized
if not self._isinit:
self._init()
if not other._isinit:
other._init()
return np.array_equal(self._lut, other._lut)
def get_bad(self):
"""Get the color for masked values."""
if not self._isinit:
self._init()
return np.array(self._lut[self._i_bad])
def set_bad(self, color='k', alpha=None):
"""Set the color for masked values."""
self._rgba_bad = to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def get_under(self):
"""Get the color for low out-of-range values."""
if not self._isinit:
self._init()
return np.array(self._lut[self._i_under])
def set_under(self, color='k', alpha=None):
"""Set the color for low out-of-range values."""
self._rgba_under = to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def get_over(self):
"""Get the color for high out-of-range values."""
if not self._isinit:
self._init()
return np.array(self._lut[self._i_over])
def set_over(self, color='k', alpha=None):
"""Set the color for high out-of-range values."""
self._rgba_over = to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_extremes(self, *, bad=None, under=None, over=None):
"""
Set the colors for masked (*bad*) values and, when ``norm.clip =
False``, low (*under*) and high (*over*) out-of-range values.
"""
if bad is not None:
self.set_bad(bad)
if under is not None:
self.set_under(under)
if over is not None:
self.set_over(over)
def with_extremes(self, *, bad=None, under=None, over=None):
"""
Return a copy of the colormap, for which the colors for masked (*bad*)
values and, when ``norm.clip = False``, low (*under*) and high (*over*)
out-of-range values, have been set accordingly.
"""
new_cm = self.copy()
new_cm.set_extremes(bad=bad, under=under, over=over)
return new_cm
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N - 1]
self._lut[self._i_bad] = self._rgba_bad
def _init(self):
"""Generate the lookup table, ``self._lut``."""
raise NotImplementedError("Abstract class only")
def is_gray(self):
"""Return whether the colormap is grayscale."""
if not self._isinit:
self._init()
return (np.all(self._lut[:, 0] == self._lut[:, 1]) and
np.all(self._lut[:, 0] == self._lut[:, 2]))
def resampled(self, lutsize):
"""Return a new colormap with *lutsize* entries."""
if hasattr(self, '_resample'):
_api.warn_external(
"The ability to resample a color map is now public API "
f"However the class {type(self)} still only implements "
"the previous private _resample method. Please update "
"your class."
)
return self._resample(lutsize)
raise NotImplementedError()
def reversed(self, name=None):
"""
Return a reversed instance of the Colormap.
.. note:: This function is not implemented for the base class.
Parameters
----------
name : str, optional
The name for the reversed colormap. If None, the
name is set to ``self.name + "_r"``.
See Also
--------
LinearSegmentedColormap.reversed
ListedColormap.reversed
"""
raise NotImplementedError()
def _repr_png_(self):
"""Generate a PNG representation of the Colormap."""
X = np.tile(np.linspace(0, 1, _REPR_PNG_SIZE[0]),
(_REPR_PNG_SIZE[1], 1))
pixels = self(X, bytes=True)
png_bytes = io.BytesIO()
title = self.name + ' colormap'
author = f'Matplotlib v{mpl.__version__}, https://matplotlib.org'
pnginfo = PngInfo()
pnginfo.add_text('Title', title)
pnginfo.add_text('Description', title)
pnginfo.add_text('Author', author)
pnginfo.add_text('Software', author)
Image.fromarray(pixels).save(png_bytes, format='png', pnginfo=pnginfo)
return png_bytes.getvalue()
def _repr_html_(self):
"""Generate an HTML representation of the Colormap."""
png_bytes = self._repr_png_()
png_base64 = base64.b64encode(png_bytes).decode('ascii')
def color_block(color):
hex_color = to_hex(color, keep_alpha=True)
return (f'<div title="{hex_color}" '
'style="display: inline-block; '
'width: 1em; height: 1em; '
'margin: 0; '
'vertical-align: middle; '
'border: 1px solid #555; '
f'background-color: {hex_color};"></div>')
return ('<div style="vertical-align: middle;">'
f'<strong>{self.name}</strong> '
'</div>'
'<div class="cmap"><img '
f'alt="{self.name} colormap" '
f'title="{self.name}" '
'style="border: 1px solid #555;" '
f'src="data:image/png;base64,{png_base64}"></div>'
'<div style="vertical-align: middle; '
f'max-width: {_REPR_PNG_SIZE[0]+2}px; '
'display: flex; justify-content: space-between;">'
'<div style="float: left;">'
f'{color_block(self.get_under())} under'
'</div>'
'<div style="margin: 0 auto; display: inline-block;">'
f'bad {color_block(self.get_bad())}'
'</div>'
'<div style="float: right;">'
f'over {color_block(self.get_over())}'
'</div>')
def copy(self):
"""Return a copy of the colormap."""
return self.__copy__()
def _validate_cmap(s):
_api.check_isinstance((str, Colormap), cmap=s)
return s | null |
171,002 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError as e:
raise ValueError('not a valid aspect specification') from e | null |
171,003 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def validate_fontsize(s):
fontsizes = ['xx-small', 'x-small', 'small', 'medium', 'large',
'x-large', 'xx-large', 'smaller', 'larger']
if isinstance(s, str):
s = s.lower()
if s in fontsizes:
return s
try:
return float(s)
except ValueError as e:
raise ValueError("%s is not a valid font size. Valid font sizes "
"are %s." % (s, ", ".join(fontsizes))) from e
def validate_fontsize_None(s):
if s is None or s == 'None':
return None
else:
return validate_fontsize(s) | null |
171,004 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def validate_fontweight(s):
weights = [
'ultralight', 'light', 'normal', 'regular', 'book', 'medium', 'roman',
'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black']
# Note: Historically, weights have been case-sensitive in Matplotlib
if s in weights:
return s
try:
return int(s)
except (ValueError, TypeError) as e:
raise ValueError(f'{s} is not a valid font weight.') from e | null |
171,005 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def validate_fontstretch(s):
stretchvalues = [
'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed',
'normal', 'semi-expanded', 'expanded', 'extra-expanded',
'ultra-expanded']
# Note: Historically, stretchvalues have been case-sensitive in Matplotlib
if s in stretchvalues:
return s
try:
return int(s)
except (ValueError, TypeError) as e:
raise ValueError(f'{s} is not a valid font stretch.') from e | null |
171,006 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def parse_fontconfig_pattern(pattern):
"""
Parse a fontconfig *pattern* into a dict that can initialize a
`.font_manager.FontProperties` object.
"""
parser = _make_fontconfig_parser()
try:
parse = parser.parseString(pattern)
except ParseException as err:
# explain becomes a plain method on pyparsing 3 (err.explain(0)).
raise ValueError("\n" + ParseException.explain(err, 0)) from None
parser.resetCache()
props = {}
if "families" in parse:
props["family"] = [*map(_family_unescape, parse["families"])]
if "sizes" in parse:
props["size"] = [*parse["sizes"]]
for prop in parse.get("properties", []):
if len(prop) == 1:
if prop[0] not in _CONSTANTS:
_api.warn_deprecated(
"3.7", message=f"Support for unknown constants "
f"({prop[0]!r}) is deprecated since %(since)s and "
f"will be removed %(removal)s.")
continue
prop = _CONSTANTS[prop[0]]
k, *v = prop
props.setdefault(k, []).extend(map(_value_unescape, v))
return props
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s | null |
171,007 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def _validate_mathtext_fallback(s):
_fallback_fonts = ['cm', 'stix', 'stixsans']
if isinstance(s, str):
s = s.lower()
if s is None or s == 'none':
return None
elif s.lower() in _fallback_fonts:
return s
else:
raise ValueError(
f"{s} is not a valid fallback font name. Valid fallback font "
f"names are {','.join(_fallback_fonts)}. Passing 'None' will turn "
"fallback off.") | null |
171,008 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
def _listify_validator(scalar_validator, allow_stringlist=False, *,
n=None, doc=None):
def f(s):
if isinstance(s, str):
try:
val = [scalar_validator(v.strip()) for v in s.split(',')
if v.strip()]
except Exception:
if allow_stringlist:
# Sometimes, a list of colors might be a single string
# of single-letter colornames. So give that a shot.
val = [scalar_validator(v.strip()) for v in s if v.strip()]
else:
raise
# Allow any ordered sequence type -- generators, np.ndarray, pd.Series
# -- but not sets, whose iteration order is non-deterministic.
elif np.iterable(s) and not isinstance(s, (set, frozenset)):
# The condition on this list comprehension will preserve the
# behavior of filtering out any empty strings (behavior was
# from the original validate_stringlist()), while allowing
# any non-string/text scalar values such as numbers and arrays.
val = [scalar_validator(v) for v in s
if not isinstance(v, str) or v]
else:
raise ValueError(
f"Expected str or other non-set iterable, but got {s}")
if n is not None and len(val) != n:
raise ValueError(
f"Expected {n} values, but there are {len(val)} values in {s}")
return val
try:
f.__name__ = "{}list".format(scalar_validator.__name__)
except AttributeError: # class instance.
f.__name__ = "{}List".format(type(scalar_validator).__name__)
f.__qualname__ = f.__qualname__.rsplit(".", 1)[0] + "." + f.__name__
f.__doc__ = doc if doc is not None else scalar_validator.__doc__
return f
validate_float = _make_type_validator(float)
def validate_whiskers(s):
try:
return _listify_validator(validate_float, n=2)(s)
except (TypeError, ValueError):
try:
return float(s)
except ValueError as e:
raise ValueError("Not a valid whisker value [float, "
"(float, float)]") from e | null |
171,009 | import ast
from functools import lru_cache, reduce
from numbers import Number
import operator
import os
import re
import numpy as np
from matplotlib import _api, cbook
from matplotlib.cbook import ls_mapper
from matplotlib.colors import Colormap, is_color_like
from matplotlib._fontconfig_pattern import parse_fontconfig_pattern
from matplotlib._enums import JoinStyle, CapStyle
from cycler import Cycler, cycler as ccycler
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False, *,
_deprecated_since=None):
"""*valid* is a list of legal strings."""
self.key = key
self.ignorecase = ignorecase
self._deprecated_since = _deprecated_since
def func(s):
if ignorecase:
return s.lower()
else:
return s
self.valid = {func(k): k for k in valid}
def __call__(self, s):
if self._deprecated_since:
name, = (k for k, v in globals().items() if v is self)
_api.warn_deprecated(
self._deprecated_since, name=name, obj_type="function")
if self.ignorecase and isinstance(s, str):
s = s.lower()
if s in self.valid:
return self.valid[s]
msg = (f"{s!r} is not a valid value for {self.key}; supported values "
f"are {[*self.valid.values()]}")
if (isinstance(s, str)
and (s.startswith('"') and s.endswith('"')
or s.startswith("'") and s.endswith("'"))
and s[1:-1] in self.valid):
msg += "; remove quotes surrounding your string"
raise ValueError(msg)
def validate_ps_distiller(s):
if isinstance(s, str):
s = s.lower()
if s in ('none', None, 'false', False):
return None
else:
return ValidateInStrings('ps.usedistiller', ['ghostscript', 'xpdf'])(s) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.