"
+ else:
+ s = "<" + str(t.type) + ">"
+ s = s.replace("\n","\\n")
+ s = s.replace("\r","\\r")
+ s = s.replace("\t","\\t")
+ return "'" + s + "'"
+
+ def getErrorListenerDispatch(self):
+ return ProxyErrorListener(self._listeners)
+
+ # subclass needs to override these if there are sempreds or actions
+ # that the ATN interp needs to execute
+ def sempred(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
+ return True
+
+ def precpred(self, localctx:RuleContext , precedence:int):
+ return True
+
+ @property
+ def state(self):
+ return self._stateNumber
+
+ # Indicate that the recognizer has changed internal state that is
+ # consistent with the ATN state passed in. This way we always know
+ # where we are in the ATN as the parser goes along. The rule
+ # context objects form a stack that lets us see the stack of
+ # invoking rules. Combine this and we have complete ATN
+ # configuration information.
+
+ @state.setter
+ def state(self, atnState:int):
+ self._stateNumber = atnState
+
+del RecognitionException
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/RuleContext.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/RuleContext.py
new file mode 100644
index 0000000000000000000000000000000000000000..7812ba3b1c87cc53dd5a7e64cdd0c556f6a43822
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/RuleContext.py
@@ -0,0 +1,227 @@
+# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+# Use of this file is governed by the BSD 3-clause license that
+# can be found in the LICENSE.txt file in the project root.
+#/
+
+
+# A rule context is a record of a single rule invocation. It knows
+# which context invoked it, if any. If there is no parent context, then
+# naturally the invoking state is not valid. The parent link
+# provides a chain upwards from the current rule invocation to the root
+# of the invocation tree, forming a stack. We actually carry no
+# information about the rule associated with this context (except
+# when parsing). We keep only the state number of the invoking state from
+# the ATN submachine that invoked this. Contrast this with the s
+# pointer inside ParserRuleContext that tracks the current state
+# being "executed" for the current rule.
+#
+# The parent contexts are useful for computing lookahead sets and
+# getting error information.
+#
+# These objects are used during parsing and prediction.
+# For the special case of parsers, we use the subclass
+# ParserRuleContext.
+#
+# @see ParserRuleContext
+#/
+from io import StringIO
+from antlr4.tree.Tree import RuleNode, INVALID_INTERVAL, ParseTreeVisitor
+from antlr4.tree.Trees import Trees
+
+# need forward declarations
+RuleContext = None
+Parser = None
+
+class RuleContext(RuleNode):
+ __slots__ = ('parentCtx', 'invokingState')
+ EMPTY = None
+
+ def __init__(self, parent:RuleContext=None, invokingState:int=-1):
+ super().__init__()
+ # What context invoked this rule?
+ self.parentCtx = parent
+ # What state invoked the rule associated with this context?
+ # The "return address" is the followState of invokingState
+ # If parent is null, this should be -1.
+ self.invokingState = invokingState
+
+
+ def depth(self):
+ n = 0
+ p = self
+ while p is not None:
+ p = p.parentCtx
+ n += 1
+ return n
+
+ # A context is empty if there is no invoking state; meaning nobody call
+ # current context.
+ def isEmpty(self):
+ return self.invokingState == -1
+
+ # satisfy the ParseTree / SyntaxTree interface
+
+ def getSourceInterval(self):
+ return INVALID_INTERVAL
+
+ def getRuleContext(self):
+ return self
+
+ def getPayload(self):
+ return self
+
+ # Return the combined text of all child nodes. This method only considers
+ # tokens which have been added to the parse tree.
+ #
+ # Since tokens on hidden channels (e.g. whitespace or comments) are not
+ # added to the parse trees, they will not appear in the output of this
+ # method.
+ #/
+ def getText(self):
+ if self.getChildCount() == 0:
+ return ""
+ with StringIO() as builder:
+ for child in self.getChildren():
+ builder.write(child.getText())
+ return builder.getvalue()
+
+ def getRuleIndex(self):
+ return -1
+
+ # For rule associated with this parse tree internal node, return
+ # the outer alternative number used to match the input. Default
+ # implementation does not compute nor store this alt num. Create
+ # a subclass of ParserRuleContext with backing field and set
+ # option contextSuperClass.
+ # to set it.
+ def getAltNumber(self):
+ return 0 # should use ATN.INVALID_ALT_NUMBER but won't compile
+
+ # Set the outer alternative number for this context node. Default
+ # implementation does nothing to avoid backing field overhead for
+ # trees that don't need it. Create
+ # a subclass of ParserRuleContext with backing field and set
+ # option contextSuperClass.
+ def setAltNumber(self, altNumber:int):
+ pass
+
+ def getChild(self, i:int):
+ return None
+
+ def getChildCount(self):
+ return 0
+
+ def getChildren(self):
+ for c in []:
+ yield c
+
+ def accept(self, visitor:ParseTreeVisitor):
+ return visitor.visitChildren(self)
+
+ # # Call this method to view a parse tree in a dialog box visually.#/
+ # public Future inspect(@Nullable Parser parser) {
+ # List ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
+ # return inspect(ruleNames);
+ # }
+ #
+ # public Future inspect(@Nullable List ruleNames) {
+ # TreeViewer viewer = new TreeViewer(ruleNames, this);
+ # return viewer.open();
+ # }
+ #
+ # # Save this tree in a postscript file#/
+ # public void save(@Nullable Parser parser, String fileName)
+ # throws IOException, PrintException
+ # {
+ # List ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
+ # save(ruleNames, fileName);
+ # }
+ #
+ # # Save this tree in a postscript file using a particular font name and size#/
+ # public void save(@Nullable Parser parser, String fileName,
+ # String fontName, int fontSize)
+ # throws IOException
+ # {
+ # List ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
+ # save(ruleNames, fileName, fontName, fontSize);
+ # }
+ #
+ # # Save this tree in a postscript file#/
+ # public void save(@Nullable List ruleNames, String fileName)
+ # throws IOException, PrintException
+ # {
+ # Trees.writePS(this, ruleNames, fileName);
+ # }
+ #
+ # # Save this tree in a postscript file using a particular font name and size#/
+ # public void save(@Nullable List ruleNames, String fileName,
+ # String fontName, int fontSize)
+ # throws IOException
+ # {
+ # Trees.writePS(this, ruleNames, fileName, fontName, fontSize);
+ # }
+ #
+ # # Print out a whole tree, not just a node, in LISP format
+ # # (root child1 .. childN). Print just a node if this is a leaf.
+ # # We have to know the recognizer so we can get rule names.
+ # #/
+ # @Override
+ # public String toStringTree(@Nullable Parser recog) {
+ # return Trees.toStringTree(this, recog);
+ # }
+ #
+ # Print out a whole tree, not just a node, in LISP format
+ # (root child1 .. childN). Print just a node if this is a leaf.
+ #
+ def toStringTree(self, ruleNames:list=None, recog:Parser=None):
+ return Trees.toStringTree(self, ruleNames=ruleNames, recog=recog)
+ # }
+ #
+ # @Override
+ # public String toStringTree() {
+ # return toStringTree((List)null);
+ # }
+ #
+ def __str__(self):
+ return self.toString(None, None)
+
+ # @Override
+ # public String toString() {
+ # return toString((List)null, (RuleContext)null);
+ # }
+ #
+ # public final String toString(@Nullable Recognizer,?> recog) {
+ # return toString(recog, ParserRuleContext.EMPTY);
+ # }
+ #
+ # public final String toString(@Nullable List ruleNames) {
+ # return toString(ruleNames, null);
+ # }
+ #
+ # // recog null unless ParserRuleContext, in which case we use subclass toString(...)
+ # public String toString(@Nullable Recognizer,?> recog, @Nullable RuleContext stop) {
+ # String[] ruleNames = recog != null ? recog.getRuleNames() : null;
+ # List ruleNamesList = ruleNames != null ? Arrays.asList(ruleNames) : null;
+ # return toString(ruleNamesList, stop);
+ # }
+
+ def toString(self, ruleNames:list, stop:RuleContext)->str:
+ with StringIO() as buf:
+ p = self
+ buf.write("[")
+ while p is not None and p is not stop:
+ if ruleNames is None:
+ if not p.isEmpty():
+ buf.write(str(p.invokingState))
+ else:
+ ri = p.getRuleIndex()
+ ruleName = ruleNames[ri] if ri >= 0 and ri < len(ruleNames) else str(ri)
+ buf.write(ruleName)
+
+ if p.parentCtx is not None and (ruleNames is not None or not p.parentCtx.isEmpty()):
+ buf.write(" ")
+
+ p = p.parentCtx
+
+ buf.write("]")
+ return buf.getvalue()
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/StdinStream.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/StdinStream.py
new file mode 100644
index 0000000000000000000000000000000000000000..f044fc4d770b4bc86cce2a5578f2c2fa00fc7602
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/StdinStream.py
@@ -0,0 +1,11 @@
+import codecs
+import sys
+
+from antlr4.InputStream import InputStream
+
+
+class StdinStream(InputStream):
+ def __init__(self, encoding:str='ascii', errors:str='strict') -> None:
+ bytes = sys.stdin.buffer.read()
+ data = codecs.decode(bytes, encoding, errors)
+ super().__init__(data)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/Token.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/Token.py
new file mode 100644
index 0000000000000000000000000000000000000000..10a68a8c2260e47838a1c9403def21725aecc244
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/Token.py
@@ -0,0 +1,155 @@
+# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+# Use of this file is governed by the BSD 3-clause license that
+# can be found in the LICENSE.txt file in the project root.
+#
+
+# A token has properties: text, type, line, character position in the line
+# (so we can ignore tabs), token channel, index, and source from which
+# we obtained this token.
+from io import StringIO
+
+
+class Token (object):
+ __slots__ = ('source', 'type', 'channel', 'start', 'stop', 'tokenIndex', 'line', 'column', '_text')
+
+ INVALID_TYPE = 0
+
+ # During lookahead operations, this "token" signifies we hit rule end ATN state
+ # and did not follow it despite needing to.
+ EPSILON = -2
+
+ MIN_USER_TOKEN_TYPE = 1
+
+ EOF = -1
+
+ # All tokens go to the parser (unless skip() is called in that rule)
+ # on a particular "channel". The parser tunes to a particular channel
+ # so that whitespace etc... can go to the parser on a "hidden" channel.
+
+ DEFAULT_CHANNEL = 0
+
+ # Anything on different channel than DEFAULT_CHANNEL is not parsed
+ # by parser.
+
+ HIDDEN_CHANNEL = 1
+
+ def __init__(self):
+ self.source = None
+ self.type = None # token type of the token
+ self.channel = None # The parser ignores everything not on DEFAULT_CHANNEL
+ self.start = None # optional; return -1 if not implemented.
+ self.stop = None # optional; return -1 if not implemented.
+ self.tokenIndex = None # from 0..n-1 of the token object in the input stream
+ self.line = None # line=1..n of the 1st character
+ self.column = None # beginning of the line at which it occurs, 0..n-1
+ self._text = None # text of the token.
+
+ @property
+ def text(self):
+ return self._text
+
+ # Explicitly set the text for this token. If {code text} is not
+ # {@code null}, then {@link #getText} will return this value rather than
+ # extracting the text from the input.
+ #
+ # @param text The explicit text of the token, or {@code null} if the text
+ # should be obtained from the input along with the start and stop indexes
+ # of the token.
+
+ @text.setter
+ def text(self, text:str):
+ self._text = text
+
+
+ def getTokenSource(self):
+ return self.source[0]
+
+ def getInputStream(self):
+ return self.source[1]
+
+class CommonToken(Token):
+
+ # An empty {@link Pair} which is used as the default value of
+ # {@link #source} for tokens that do not have a source.
+ EMPTY_SOURCE = (None, None)
+
+ def __init__(self, source:tuple = EMPTY_SOURCE, type:int = None, channel:int=Token.DEFAULT_CHANNEL, start:int=-1, stop:int=-1):
+ super().__init__()
+ self.source = source
+ self.type = type
+ self.channel = channel
+ self.start = start
+ self.stop = stop
+ self.tokenIndex = -1
+ if source[0] is not None:
+ self.line = source[0].line
+ self.column = source[0].column
+ else:
+ self.column = -1
+
+ # Constructs a new {@link CommonToken} as a copy of another {@link Token}.
+ #
+ #
+ # If {@code oldToken} is also a {@link CommonToken} instance, the newly
+ # constructed token will share a reference to the {@link #text} field and
+ # the {@link Pair} stored in {@link #source}. Otherwise, {@link #text} will
+ # be assigned the result of calling {@link #getText}, and {@link #source}
+ # will be constructed from the result of {@link Token#getTokenSource} and
+ # {@link Token#getInputStream}.
+ #
+ # @param oldToken The token to copy.
+ #
+ def clone(self):
+ t = CommonToken(self.source, self.type, self.channel, self.start, self.stop)
+ t.tokenIndex = self.tokenIndex
+ t.line = self.line
+ t.column = self.column
+ t.text = self.text
+ return t
+
+ @property
+ def text(self):
+ if self._text is not None:
+ return self._text
+ input = self.getInputStream()
+ if input is None:
+ return None
+ n = input.size
+ if self.start < n and self.stop < n:
+ return input.getText(self.start, self.stop)
+ else:
+ return ""
+
+ @text.setter
+ def text(self, text:str):
+ self._text = text
+
+ def __str__(self):
+ with StringIO() as buf:
+ buf.write("[@")
+ buf.write(str(self.tokenIndex))
+ buf.write(",")
+ buf.write(str(self.start))
+ buf.write(":")
+ buf.write(str(self.stop))
+ buf.write("='")
+ txt = self.text
+ if txt is not None:
+ txt = txt.replace("\n","\\n")
+ txt = txt.replace("\r","\\r")
+ txt = txt.replace("\t","\\t")
+ else:
+ txt = ""
+ buf.write(txt)
+ buf.write("',<")
+ buf.write(str(self.type))
+ buf.write(">")
+ if self.channel > 0:
+ buf.write(",channel=")
+ buf.write(str(self.channel))
+ buf.write(",")
+ buf.write(str(self.line))
+ buf.write(":")
+ buf.write(str(self.column))
+ buf.write("]")
+ return buf.getvalue()
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/TokenStreamRewriter.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/TokenStreamRewriter.py
new file mode 100644
index 0000000000000000000000000000000000000000..59baf8f47a1ba447a143842c8df825ef58035918
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/TokenStreamRewriter.py
@@ -0,0 +1,255 @@
+#
+# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+# Use of this file is governed by the BSD 3-clause license that
+# can be found in the LICENSE.txt file in the project root.
+#
+
+from io import StringIO
+from antlr4.Token import Token
+
+from antlr4.CommonTokenStream import CommonTokenStream
+
+
+class TokenStreamRewriter(object):
+ __slots__ = ('tokens', 'programs', 'lastRewriteTokenIndexes')
+
+ DEFAULT_PROGRAM_NAME = "default"
+ PROGRAM_INIT_SIZE = 100
+ MIN_TOKEN_INDEX = 0
+
+ def __init__(self, tokens):
+ """
+ :type tokens: antlr4.BufferedTokenStream.BufferedTokenStream
+ :param tokens:
+ :return:
+ """
+ super(TokenStreamRewriter, self).__init__()
+ self.tokens = tokens
+ self.programs = {self.DEFAULT_PROGRAM_NAME: []}
+ self.lastRewriteTokenIndexes = {}
+
+ def getTokenStream(self):
+ return self.tokens
+
+ def rollback(self, instruction_index, program_name):
+ ins = self.programs.get(program_name, None)
+ if ins:
+ self.programs[program_name] = ins[self.MIN_TOKEN_INDEX: instruction_index]
+
+ def deleteProgram(self, program_name=DEFAULT_PROGRAM_NAME):
+ self.rollback(self.MIN_TOKEN_INDEX, program_name)
+
+ def insertAfterToken(self, token, text, program_name=DEFAULT_PROGRAM_NAME):
+ self.insertAfter(token.tokenIndex, text, program_name)
+
+ def insertAfter(self, index, text, program_name=DEFAULT_PROGRAM_NAME):
+ op = self.InsertAfterOp(self.tokens, index + 1, text)
+ rewrites = self.getProgram(program_name)
+ op.instructionIndex = len(rewrites)
+ rewrites.append(op)
+
+ def insertBeforeIndex(self, index, text):
+ self.insertBefore(self.DEFAULT_PROGRAM_NAME, index, text)
+
+ def insertBeforeToken(self, token, text, program_name=DEFAULT_PROGRAM_NAME):
+ self.insertBefore(program_name, token.tokenIndex, text)
+
+ def insertBefore(self, program_name, index, text):
+ op = self.InsertBeforeOp(self.tokens, index, text)
+ rewrites = self.getProgram(program_name)
+ op.instructionIndex = len(rewrites)
+ rewrites.append(op)
+
+ def replaceIndex(self, index, text):
+ self.replace(self.DEFAULT_PROGRAM_NAME, index, index, text)
+
+ def replaceRange(self, from_idx, to_idx, text):
+ self.replace(self.DEFAULT_PROGRAM_NAME, from_idx, to_idx, text)
+
+ def replaceSingleToken(self, token, text):
+ self.replace(self.DEFAULT_PROGRAM_NAME, token.tokenIndex, token.tokenIndex, text)
+
+ def replaceRangeTokens(self, from_token, to_token, text, program_name=DEFAULT_PROGRAM_NAME):
+ self.replace(program_name, from_token.tokenIndex, to_token.tokenIndex, text)
+
+ def replace(self, program_name, from_idx, to_idx, text):
+ if any((from_idx > to_idx, from_idx < 0, to_idx < 0, to_idx >= len(self.tokens.tokens))):
+ raise ValueError(
+ 'replace: range invalid: {}..{}(size={})'.format(from_idx, to_idx, len(self.tokens.tokens)))
+ op = self.ReplaceOp(from_idx, to_idx, self.tokens, text)
+ rewrites = self.getProgram(program_name)
+ op.instructionIndex = len(rewrites)
+ rewrites.append(op)
+
+ def deleteToken(self, token):
+ self.delete(self.DEFAULT_PROGRAM_NAME, token, token)
+
+ def deleteIndex(self, index):
+ self.delete(self.DEFAULT_PROGRAM_NAME, index, index)
+
+ def delete(self, program_name, from_idx, to_idx):
+ if isinstance(from_idx, Token):
+ self.replace(program_name, from_idx.tokenIndex, to_idx.tokenIndex, "")
+ else:
+ self.replace(program_name, from_idx, to_idx, "")
+
+ def lastRewriteTokenIndex(self, program_name=DEFAULT_PROGRAM_NAME):
+ return self.lastRewriteTokenIndexes.get(program_name, -1)
+
+ def setLastRewriteTokenIndex(self, program_name, i):
+ self.lastRewriteTokenIndexes[program_name] = i
+
+ def getProgram(self, program_name):
+ return self.programs.setdefault(program_name, [])
+
+ def getDefaultText(self):
+ return self.getText(self.DEFAULT_PROGRAM_NAME, 0, len(self.tokens.tokens) - 1)
+
+ def getText(self, program_name, start:int, stop:int):
+ """
+ :return: the text in tokens[start, stop](closed interval)
+ """
+ rewrites = self.programs.get(program_name)
+
+ # ensure start/end are in range
+ if stop > len(self.tokens.tokens) - 1:
+ stop = len(self.tokens.tokens) - 1
+ if start < 0:
+ start = 0
+
+ # if no instructions to execute
+ if not rewrites: return self.tokens.getText(start, stop)
+ buf = StringIO()
+ indexToOp = self._reduceToSingleOperationPerIndex(rewrites)
+ i = start
+ while all((i <= stop, i < len(self.tokens.tokens))):
+ op = indexToOp.pop(i, None)
+ token = self.tokens.get(i)
+ if op is None:
+ if token.type != Token.EOF: buf.write(token.text)
+ i += 1
+ else:
+ i = op.execute(buf)
+
+ if stop == len(self.tokens.tokens)-1:
+ for op in indexToOp.values():
+ if op.index >= len(self.tokens.tokens)-1: buf.write(op.text)
+
+ return buf.getvalue()
+
+ def _reduceToSingleOperationPerIndex(self, rewrites):
+ # Walk replaces
+ for i, rop in enumerate(rewrites):
+ if any((rop is None, not isinstance(rop, TokenStreamRewriter.ReplaceOp))):
+ continue
+ # Wipe prior inserts within range
+ inserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)]
+ for iop in inserts:
+ if iop.index == rop.index:
+ rewrites[iop.instructionIndex] = None
+ rop.text = '{}{}'.format(iop.text, rop.text)
+ elif all((iop.index > rop.index, iop.index <= rop.last_index)):
+ rewrites[iop.instructionIndex] = None
+
+ # Drop any prior replaces contained within
+ prevReplaces = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.ReplaceOp)]
+ for prevRop in prevReplaces:
+ if all((prevRop.index >= rop.index, prevRop.last_index <= rop.last_index)):
+ rewrites[prevRop.instructionIndex] = None
+ continue
+ isDisjoint = any((prevRop.last_indexrop.last_index))
+ if all((prevRop.text is None, rop.text is None, not isDisjoint)):
+ rewrites[prevRop.instructionIndex] = None
+ rop.index = min(prevRop.index, rop.index)
+ rop.last_index = min(prevRop.last_index, rop.last_index)
+ print('New rop {}'.format(rop))
+ elif (not(isDisjoint)):
+ raise ValueError("replace op boundaries of {} overlap with previous {}".format(rop, prevRop))
+
+ # Walk inserts
+ for i, iop in enumerate(rewrites):
+ if any((iop is None, not isinstance(iop, TokenStreamRewriter.InsertBeforeOp))):
+ continue
+ prevInserts = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.InsertBeforeOp)]
+ for prev_index, prevIop in enumerate(prevInserts):
+ if prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertBeforeOp:
+ iop.text += prevIop.text
+ rewrites[prev_index] = None
+ elif prevIop.index == iop.index and type(prevIop) is TokenStreamRewriter.InsertAfterOp:
+ iop.text = prevIop.text + iop.text
+ rewrites[prev_index] = None
+ # look for replaces where iop.index is in range; error
+ prevReplaces = [op for op in rewrites[:i] if isinstance(op, TokenStreamRewriter.ReplaceOp)]
+ for rop in prevReplaces:
+ if iop.index == rop.index:
+ rop.text = iop.text + rop.text
+ rewrites[i] = None
+ continue
+ if all((iop.index >= rop.index, iop.index <= rop.last_index)):
+ raise ValueError("insert op {} within boundaries of previous {}".format(iop, rop))
+
+ reduced = {}
+ for i, op in enumerate(rewrites):
+ if op is None: continue
+ if reduced.get(op.index): raise ValueError('should be only one op per index')
+ reduced[op.index] = op
+
+ return reduced
+
+ class RewriteOperation(object):
+ __slots__ = ('tokens', 'index', 'text', 'instructionIndex')
+
+ def __init__(self, tokens, index, text=""):
+ """
+ :type tokens: CommonTokenStream
+ :param tokens:
+ :param index:
+ :param text:
+ :return:
+ """
+ self.tokens = tokens
+ self.index = index
+ self.text = text
+ self.instructionIndex = 0
+
+ def execute(self, buf):
+ """
+ :type buf: StringIO.StringIO
+ :param buf:
+ :return:
+ """
+ return self.index
+
+ def __str__(self):
+ return '<{}@{}:"{}">'.format(self.__class__.__name__, self.tokens.get(self.index), self.text)
+
+ class InsertBeforeOp(RewriteOperation):
+
+ def __init__(self, tokens, index, text=""):
+ super(TokenStreamRewriter.InsertBeforeOp, self).__init__(tokens, index, text)
+
+ def execute(self, buf):
+ buf.write(self.text)
+ if self.tokens.get(self.index).type != Token.EOF:
+ buf.write(self.tokens.get(self.index).text)
+ return self.index + 1
+
+ class InsertAfterOp(InsertBeforeOp):
+ pass
+
+ class ReplaceOp(RewriteOperation):
+ __slots__ = 'last_index'
+
+ def __init__(self, from_idx, to_idx, tokens, text):
+ super(TokenStreamRewriter.ReplaceOp, self).__init__(tokens, from_idx, text)
+ self.last_index = to_idx
+
+ def execute(self, buf):
+ if self.text:
+ buf.write(self.text)
+ return self.last_index + 1
+
+ def __str__(self):
+ if self.text:
+ return ''.format(self.tokens.get(self.index), self.tokens.get(self.last_index),
+ self.text)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/Utils.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/Utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..88c870dae63ef4bda74e25aea47befb75be34a5d
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/Utils.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+# Use of this file is governed by the BSD 3-clause license that
+# can be found in the LICENSE.txt file in the project root.
+#
+
+from io import StringIO
+
+def str_list(val):
+ with StringIO() as buf:
+ buf.write('[')
+ first = True
+ for item in val:
+ if not first:
+ buf.write(', ')
+ buf.write(str(item))
+ first = False
+ buf.write(']')
+ return buf.getvalue()
+
+def escapeWhitespace(s:str, escapeSpaces:bool):
+ with StringIO() as buf:
+ for c in s:
+ if c==' ' and escapeSpaces:
+ buf.write('\u00B7')
+ elif c=='\t':
+ buf.write("\\t")
+ elif c=='\n':
+ buf.write("\\n")
+ elif c=='\r':
+ buf.write("\\r")
+ else:
+ buf.write(c)
+ return buf.getvalue()
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..42027289e7af293a297b538f07f43ca4a566ef62
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__init__.py
@@ -0,0 +1,21 @@
+from antlr4.Token import Token
+from antlr4.InputStream import InputStream
+from antlr4.FileStream import FileStream
+from antlr4.StdinStream import StdinStream
+from antlr4.BufferedTokenStream import TokenStream
+from antlr4.CommonTokenStream import CommonTokenStream
+from antlr4.Lexer import Lexer
+from antlr4.Parser import Parser
+from antlr4.dfa.DFA import DFA
+from antlr4.atn.ATN import ATN
+from antlr4.atn.ATNDeserializer import ATNDeserializer
+from antlr4.atn.LexerATNSimulator import LexerATNSimulator
+from antlr4.atn.ParserATNSimulator import ParserATNSimulator
+from antlr4.atn.PredictionMode import PredictionMode
+from antlr4.PredictionContext import PredictionContextCache
+from antlr4.ParserRuleContext import RuleContext, ParserRuleContext
+from antlr4.tree.Tree import ParseTreeListener, ParseTreeVisitor, ParseTreeWalker, TerminalNode, ErrorNode, RuleNode
+from antlr4.error.Errors import RecognitionException, IllegalStateException, NoViableAltException
+from antlr4.error.ErrorStrategy import BailErrorStrategy
+from antlr4.error.DiagnosticErrorListener import DiagnosticErrorListener
+from antlr4.Utils import str_list
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/INSTALLER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/LICENSE b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..28ca56277f0c598235b91158fc8f99755997fb0e
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/LICENSE
@@ -0,0 +1,30 @@
+Copyright (c) 2008 Andrew Collette and contributors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the
+ distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/METADATA b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..6c27cb065a0bd8ec277fc7f03edcad6cab02f53b
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/METADATA
@@ -0,0 +1,44 @@
+Metadata-Version: 2.1
+Name: h5py
+Version: 3.7.0
+Summary: Read and write HDF5 files from Python
+Home-page: http://www.h5py.org
+Download-URL: https://pypi.python.org/pypi/h5py
+Author: Andrew Collette
+Author-email: andrew.collette@gmail.com
+Maintainer: Andrew Collette
+Maintainer-email: andrew.collette@gmail.com
+License: BSD
+Project-URL: Source, https://github.com/h5py/h5py
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Cython
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: Scientific/Engineering
+Classifier: Topic :: Database
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Operating System :: Unix
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Requires-Python: >=3.7
+License-File: LICENSE
+Requires-Dist: numpy (>=1.14.5)
+
+
+The h5py package provides both a high- and low-level interface to the HDF5
+library from Python. The low-level interface is intended to be a complete
+wrapping of the HDF5 API, while the high-level component supports access to
+HDF5 files, datasets and groups using established Python and NumPy concepts.
+
+A strong emphasis on automatic conversion between Python (Numpy) datatypes and
+data structures and their HDF5 equivalents vastly simplifies the process of
+reading and writing data from Python.
+
+Supports HDF5 versions 1.8.4 and higher. On Windows, HDF5 is included with
+the installer.
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/RECORD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..89ae159958955eb2b81a839ca2f2c11136c29846
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/RECORD
@@ -0,0 +1,148 @@
+h5py-3.7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+h5py-3.7.0.dist-info/LICENSE,sha256=5XSylmVc-wylGA7G7uPJXRIrCQZzbTyn484Z4PKX8Mk,1520
+h5py-3.7.0.dist-info/METADATA,sha256=JamkAuiXXwlpN92PSUxGUL1Oa98Xpu2n3EkeSLxgzrQ,1837
+h5py-3.7.0.dist-info/RECORD,,
+h5py-3.7.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+h5py-3.7.0.dist-info/WHEEL,sha256=VC8YzyxoGiXrzl8xKL9ftgvyZU-Uz6pGPrSATb2mDHw,148
+h5py-3.7.0.dist-info/top_level.txt,sha256=fO7Bsaa0F3Nx6djErCCbSw4-E7rBFMWrBVTGLEMxUMg,5
+h5py.libs/libaec-9c9e97eb.so.0.0.10,sha256=00fJrQvyEz3nmFC9A-CrEYWhvDycTXQ1IHs9FGiGtU0,127816
+h5py.libs/libhdf5-fc7245dc.so.200.2.0,sha256=rIcGfgQl3P4XSe2gYlr9-zyvPdSLvJjo1p7PG_4Z7MA,4937968
+h5py.libs/libhdf5_hl-f1927c41.so.200.1.0,sha256=c1-cosBoYxKdtJLn3BpeZuakhvBkPFI8azR1YCdlo-g,177832
+h5py.libs/libsz-090daab4.so.2.0.1,sha256=-pYPuh-RJVXMtGwuNDzGr-GacC-te7n49Wig8TL-H7o,33128
+h5py/__init__.py,sha256=xDFUGba8pzZLl6QlF34QdZfZmg5kcRRYjYSKNKycMm8,3740
+h5py/__pycache__/__init__.cpython-38.pyc,,
+h5py/__pycache__/h5py_warnings.cpython-38.pyc,,
+h5py/__pycache__/ipy_completer.cpython-38.pyc,,
+h5py/__pycache__/version.cpython-38.pyc,,
+h5py/_conv.cpython-38-x86_64-linux-gnu.so,sha256=wJ4m94mCgjBWiI__Ck3h7uRZLH4Ndhz0JhS1VMtmKVg,438392
+h5py/_errors.cpython-38-x86_64-linux-gnu.so,sha256=QDlUhPFKWG6ywVS2Wsb3glLrKwbL5kwvKaZ-r4zj2HM,125984
+h5py/_hl/__init__.py,sha256=R6siZWBApRmGrOkxi8kNKxZ0Hcp2MieM-0EBWwbp7vc,457
+h5py/_hl/__pycache__/__init__.cpython-38.pyc,,
+h5py/_hl/__pycache__/attrs.cpython-38.pyc,,
+h5py/_hl/__pycache__/base.cpython-38.pyc,,
+h5py/_hl/__pycache__/compat.cpython-38.pyc,,
+h5py/_hl/__pycache__/dataset.cpython-38.pyc,,
+h5py/_hl/__pycache__/datatype.cpython-38.pyc,,
+h5py/_hl/__pycache__/dims.cpython-38.pyc,,
+h5py/_hl/__pycache__/files.cpython-38.pyc,,
+h5py/_hl/__pycache__/filters.cpython-38.pyc,,
+h5py/_hl/__pycache__/group.cpython-38.pyc,,
+h5py/_hl/__pycache__/selections.cpython-38.pyc,,
+h5py/_hl/__pycache__/selections2.cpython-38.pyc,,
+h5py/_hl/__pycache__/vds.cpython-38.pyc,,
+h5py/_hl/attrs.py,sha256=FdFYkMCrfkfd_CtRMEC4PA-674nG6ker7KoP0W1sM2g,10666
+h5py/_hl/base.py,sha256=y-V-YnrDlJEBitBNhxusJmRg2JwPlmHg4wupkCVv5BI,15669
+h5py/_hl/compat.py,sha256=fjqDXyrhyrtLTS2sIUm71B73NDjSNge2bvEpxTlHPZc,1375
+h5py/_hl/dataset.py,sha256=xq7ECdnQUOBFi6kwbeQcMSHc6XrZjzR8ReXmDrLx7Q8,40933
+h5py/_hl/datatype.py,sha256=-kIh5Ha9K21VLqInUmZ6SZdXag1yvyo30MpgsM3H0P0,1548
+h5py/_hl/dims.py,sha256=QaP2nbBVYcnTg9mtKtQMommpMYrsgbPXt4ZbBCveD7I,5422
+h5py/_hl/files.py,sha256=IG0ZSjBppYeDFJl3bYYqeV4H86Ctz36pstoEGjWZYQ4,22505
+h5py/_hl/filters.py,sha256=vVtjQ22OWpMGle6uXSk_TbwvmdrIl6Az7tE8v0fGs6k,13857
+h5py/_hl/group.py,sha256=2UtPwDZc18rUrt4KFzQzVRW9nZKkAoigtbwPdXzXFbs,25709
+h5py/_hl/selections.py,sha256=APiHZTZPoKR8cSeacrbVsbAJBZiv2mZzMw6FzUt0Tvk,14478
+h5py/_hl/selections2.py,sha256=PcKxIbSCYjL_QrbeCHw8-MHWDBxOlEDG_UHvP4aBuKY,2723
+h5py/_hl/vds.py,sha256=1aIso-p-pVZVX1OVoe0uDose_hJB8K-prnvrl75Bz3g,9353
+h5py/_objects.cpython-38-x86_64-linux-gnu.so,sha256=iaPaJd8BQGXAWkWbT6u7WkXxgE4jcixcpbS06CqBY5s,302720
+h5py/_proxy.cpython-38-x86_64-linux-gnu.so,sha256=Z_d6WTYTiSFmQ9VMb5vzjoO0WDz0XR-WWZrxluItuIE,113016
+h5py/_selector.cpython-38-x86_64-linux-gnu.so,sha256=7iGibwtvpe5WZMbypYc2vIuLFMW2vHLdn346oLdGlRM,360256
+h5py/defs.cpython-38-x86_64-linux-gnu.so,sha256=XkqZgpqdTiXgEwvmiB34gb43Y1MR7TQn4S838-ZyyAg,618600
+h5py/h5.cpython-38-x86_64-linux-gnu.so,sha256=m9iyydX5yBK94ngMY604yYbs-TnQ39K3ARAvDQulgp4,265208
+h5py/h5a.cpython-38-x86_64-linux-gnu.so,sha256=LvPlDqJkaFo4cQ6s7m6EUPlekBrr14-wcN2fFQfbyb4,269232
+h5py/h5ac.cpython-38-x86_64-linux-gnu.so,sha256=42llqjBDWySsGqnsITSk43IUbfNvwCaiOkkmTA71rMw,149784
+h5py/h5d.cpython-38-x86_64-linux-gnu.so,sha256=N4PnCMXHRtetyNQJH3RJe-WJDXAI62PsdPFF8AhzpXA,281560
+h5py/h5ds.cpython-38-x86_64-linux-gnu.so,sha256=J-Mj32APjFm4EoRhb6XjAmFPc14uKIDt4bZPZfX4TMo,183056
+h5py/h5f.cpython-38-x86_64-linux-gnu.so,sha256=kmoQKWS94D4X8ICc1CaItXnp-Pr5BlpiXZwaEGA5aV0,289768
+h5py/h5fd.cpython-38-x86_64-linux-gnu.so,sha256=5XbCKS-aVbZMOkhGuoLXftCQl_ZDQ5oRhE8k16X3mZQ,401184
+h5py/h5g.cpython-38-x86_64-linux-gnu.so,sha256=2hyutyIK0B14Lcp5qPNq7hPU8ZKjsc3C9eg5kHnc8Ck,363864
+h5py/h5i.cpython-38-x86_64-linux-gnu.so,sha256=LQ_Zw42xTwRQISgxWsaAczJkwP6xE2OsEpJUxXxZFfI,162200
+h5py/h5l.cpython-38-x86_64-linux-gnu.so,sha256=7WbXME0dDdrs6ouoFhAeJfvf2ZkEXpLnfccZuuYPiC8,240472
+h5py/h5o.cpython-38-x86_64-linux-gnu.so,sha256=TiU90cfXTQv3UuHJu31Px9KsswzhqZVNrU5t-D6Vork,244536
+h5py/h5p.cpython-38-x86_64-linux-gnu.so,sha256=x5VAJVAxrFFLEJ4YT_1B3aLcbEnBm-NdRH94A8PpsUw,781880
+h5py/h5pl.cpython-38-x86_64-linux-gnu.so,sha256=kdrA86c_8obMnu5FWFG6_9K9q-zF8CmWKkXHNQBdP04,88200
+h5py/h5py_warnings.py,sha256=bNPu7wWPt1xLVZVB-lpycIoMux1kcNfaKf4NE78dK90,523
+h5py/h5r.cpython-38-x86_64-linux-gnu.so,sha256=iMTtxaxs969Daf78ohPHC7VxORmIxK_NH78mjs1_m20,137880
+h5py/h5s.cpython-38-x86_64-linux-gnu.so,sha256=tDnJplllrO5RAsvdQp8g6232HZdagcppNMrYdrmusMc,264920
+h5py/h5t.cpython-38-x86_64-linux-gnu.so,sha256=6yIdOmYAxh3VOZF8Z0QiCqctvWayWiR5Xu6h8-Q6tKk,950896
+h5py/h5z.cpython-38-x86_64-linux-gnu.so,sha256=mbPZZ-9K41tX5nl-gHnUPHGgqhRvp25ZLysFSZkF1Lk,104872
+h5py/ipy_completer.py,sha256=QiR-SjQYMs20eXVITR93tJWWSL3DBZBTujq1sFa-wHc,3642
+h5py/tests/__init__.py,sha256=rPMGYVW4kZp6gC69p8VAZorACrpr0LoY08s5fhqQpeA,668
+h5py/tests/__pycache__/__init__.cpython-38.pyc,,
+h5py/tests/__pycache__/common.cpython-38.pyc,,
+h5py/tests/__pycache__/conftest.cpython-38.pyc,,
+h5py/tests/__pycache__/test_attribute_create.cpython-38.pyc,,
+h5py/tests/__pycache__/test_attrs.cpython-38.pyc,,
+h5py/tests/__pycache__/test_attrs_data.cpython-38.pyc,,
+h5py/tests/__pycache__/test_base.cpython-38.pyc,,
+h5py/tests/__pycache__/test_big_endian_file.cpython-38.pyc,,
+h5py/tests/__pycache__/test_completions.cpython-38.pyc,,
+h5py/tests/__pycache__/test_dataset.cpython-38.pyc,,
+h5py/tests/__pycache__/test_dataset_getitem.cpython-38.pyc,,
+h5py/tests/__pycache__/test_dataset_swmr.cpython-38.pyc,,
+h5py/tests/__pycache__/test_datatype.cpython-38.pyc,,
+h5py/tests/__pycache__/test_dimension_scales.cpython-38.pyc,,
+h5py/tests/__pycache__/test_dims_dimensionproxy.cpython-38.pyc,,
+h5py/tests/__pycache__/test_dtype.cpython-38.pyc,,
+h5py/tests/__pycache__/test_errors.cpython-38.pyc,,
+h5py/tests/__pycache__/test_file.cpython-38.pyc,,
+h5py/tests/__pycache__/test_file2.cpython-38.pyc,,
+h5py/tests/__pycache__/test_file_alignment.cpython-38.pyc,,
+h5py/tests/__pycache__/test_file_image.cpython-38.pyc,,
+h5py/tests/__pycache__/test_filters.cpython-38.pyc,,
+h5py/tests/__pycache__/test_group.cpython-38.pyc,,
+h5py/tests/__pycache__/test_h5.cpython-38.pyc,,
+h5py/tests/__pycache__/test_h5d_direct_chunk.cpython-38.pyc,,
+h5py/tests/__pycache__/test_h5f.cpython-38.pyc,,
+h5py/tests/__pycache__/test_h5o.cpython-38.pyc,,
+h5py/tests/__pycache__/test_h5p.cpython-38.pyc,,
+h5py/tests/__pycache__/test_h5pl.cpython-38.pyc,,
+h5py/tests/__pycache__/test_h5t.cpython-38.pyc,,
+h5py/tests/__pycache__/test_objects.cpython-38.pyc,,
+h5py/tests/__pycache__/test_selections.cpython-38.pyc,,
+h5py/tests/__pycache__/test_slicing.cpython-38.pyc,,
+h5py/tests/common.py,sha256=unn8sY8fPtGAYuykfkG7rqSQsus0XypYw4rXkw-RDZE,6845
+h5py/tests/conftest.py,sha256=Oxf4T7cxJ4CRqH16k-y6VAFclkapEy1quVvbD7cDipY,143
+h5py/tests/data_files/__init__.py,sha256=WgwvU8ZVG00ZN2EOOnZ7TYJX_NzF_JCSd7nSEDJrbrE,193
+h5py/tests/data_files/__pycache__/__init__.cpython-38.pyc,,
+h5py/tests/data_files/vlen_string_dset.h5,sha256=kA-LrxnT2MTRGTrBrGAZ7nd0AF6FwDCvROZ1ezjSl5M,6304
+h5py/tests/data_files/vlen_string_dset_utc.h5,sha256=hbcoOCuDPB2mFie5ozTiKCLVwcs1n-O6byUmKvRTL2M,169904
+h5py/tests/data_files/vlen_string_s390x.h5,sha256=6pkMaOA3G6-rSWb3UvoTVbO59kNgBm9tna_48bfnTKU,9008
+h5py/tests/test_attribute_create.py,sha256=RjAa1cgcbF7aU9-a_VTwtGMkuNmgNARWhTkXZxtnpRk,2865
+h5py/tests/test_attrs.py,sha256=qw6lZNvLS8_SJ9uc-Qlvv0a37l8opju5CvOv2A_ISRI,9577
+h5py/tests/test_attrs_data.py,sha256=eSERwn8b21rs_r0cabv7U6_OXXRD4fTbIvfN-jwWgZk,7646
+h5py/tests/test_base.py,sha256=OXvSeVI2kBSz2mGhjHQiHFiYSNWeHXd1Rs99xUs7cRA,3816
+h5py/tests/test_big_endian_file.py,sha256=y8JZM9jlzaI9qbWEl7NoFY5l0idIiiXqMSGAqgSqEMA,1445
+h5py/tests/test_completions.py,sha256=J1wbtCmv-9uBETTLM3YK58Q3vhNiVmf9kaIK5BQa4RU,1473
+h5py/tests/test_dataset.py,sha256=q9g6h0yg4ZH3-_BzTfk2R6ojulsP2EUjIMPmrZRqpJs,68867
+h5py/tests/test_dataset_getitem.py,sha256=a9FAMTdgZaYHCi-IYzH1Yl1Py4EN_yxAhj5xYbM6BHo,18885
+h5py/tests/test_dataset_swmr.py,sha256=NBUJZXdq9QxDrt_c0J0by9OF-nvTmC3a2FvpFBpIIko,5825
+h5py/tests/test_datatype.py,sha256=hZGEOmvAR0BoAp2LS_rem_HVcPLS_nhwzW35EXvN3Yc,1007
+h5py/tests/test_dimension_scales.py,sha256=ZmoM7rJ4fIPs-oKvB0dG1xc3WkAR4izyqJ621xSEmRA,7942
+h5py/tests/test_dims_dimensionproxy.py,sha256=avqG5glTwLDBI2HaFtiP6uoOI40XSEFdGjoAGxV0uLE,601
+h5py/tests/test_dtype.py,sha256=CmUf7pl-zzdQoxyabqF9uRcwoQiV0Iu2msEtdcMF1Iw,17927
+h5py/tests/test_errors.py,sha256=ero4th_0YJBjXsepwfO6lwXN_tC6D_CRyBzdrXWT-5M,2247
+h5py/tests/test_file.py,sha256=NpOaEe7082fjGEit2l9m5NiA12lQH002MnoDrbEtJ-k,33263
+h5py/tests/test_file2.py,sha256=NWdxdOJZXCWtCNu-iyDd66GbuJeZr23nzBdh5MC5sso,8854
+h5py/tests/test_file_alignment.py,sha256=_TpIq2HRVPtiq_6j5UI_XH82B6uaiahAT8hoetaB04A,4375
+h5py/tests/test_file_image.py,sha256=YbdO_a0PzFpH3rrrsuHzDqDA9dsjM11i5GdX81i185Q,1524
+h5py/tests/test_filters.py,sha256=3SQ6APYFioOtYLfDW2iFIju1b4P_CjCQgX1GuSNO44c,3302
+h5py/tests/test_group.py,sha256=D-0fVUzZh3KDgfhJGCadGxAJfAEebGqyJbbqiZ_4Suk,35178
+h5py/tests/test_h5.py,sha256=FlYZcVxxP1kIYCZdSEC3sR7o72dPWYiUbhyFO0rvN-8,1216
+h5py/tests/test_h5d_direct_chunk.py,sha256=1O2n_u4sGBrkbU2Oh2u7CiA6lopvOMp36atmooBu0KY,5358
+h5py/tests/test_h5f.py,sha256=TvSSYw2GzFE5GT1TADCV6Y6X_If-llsuQBG2_2tecCQ,4018
+h5py/tests/test_h5o.py,sha256=wbDg6o-kzCQKi97Gt0IMBLgcVmNYw59EyESwcSIYaDE,508
+h5py/tests/test_h5p.py,sha256=KePE7gE26k1JIOLAmAvJcPUR6PSpuWKpw1L_IULmbzQ,7265
+h5py/tests/test_h5pl.py,sha256=L-L48S7EqCp91L0mjnyMSBxZV87wFVzoHhZFYZ2NoyY,1999
+h5py/tests/test_h5t.py,sha256=bnLA1w_57KaJcMHEGabrizZ3kU8f-aY-2wjQMWuqLow,6582
+h5py/tests/test_objects.py,sha256=YgsDBE6bMESEm2iSZ9o7P8cKp3LoLXl6ypa70pz7jQg,911
+h5py/tests/test_selections.py,sha256=97gvtofB1MoZZHPuVa4oaoSBlESWwi6l9FQxfjNd8Ys,4564
+h5py/tests/test_slicing.py,sha256=7oPhRU7OY8NtElXI1Pdc9uGZKAz457reFLag2s1RDdQ,13874
+h5py/tests/test_vds/__init__.py,sha256=2mnHIhOAdjMVjHQjBp9VFDLfltPEOR02aoZ1yTJU5hY,103
+h5py/tests/test_vds/__pycache__/__init__.cpython-38.pyc,,
+h5py/tests/test_vds/__pycache__/test_highlevel_vds.cpython-38.pyc,,
+h5py/tests/test_vds/__pycache__/test_lowlevel_vds.cpython-38.pyc,,
+h5py/tests/test_vds/__pycache__/test_virtual_source.cpython-38.pyc,,
+h5py/tests/test_vds/test_highlevel_vds.py,sha256=aMeJ6GFIoJ_ReAALEjVbT4Wnski4F0p9dO-uvvzbIEw,17262
+h5py/tests/test_vds/test_lowlevel_vds.py,sha256=91k0PwgsJ5-mUP0NRBaSi3s1TKwlOyPyeTj8KgN1QDg,12319
+h5py/tests/test_vds/test_virtual_source.py,sha256=s9EJYk8fWayEeXESiGAkPinfxiPWc3UZvaPUF9Fu3-Q,5932
+h5py/utils.cpython-38-x86_64-linux-gnu.so,sha256=GlOwilAi_LW5dOhfpkgVCAtOCpSGtiebYmBrTuyNG6s,129880
+h5py/version.py,sha256=9dh1j-XgAnkYdMKhhRzReZW2TRn4-qhh-jQ3OEp-MN8,1910
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/REQUESTED b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/WHEEL b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..8a4261f21f26b59ae1e7766e92860facbe5eaee6
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: false
+Tag: cp38-cp38-manylinux_2_12_x86_64
+Tag: cp38-cp38-manylinux2010_x86_64
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c5a4eac431789a52d7f7d521a84937511cdc400c
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py-3.7.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+h5py
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/idna-3.3.dist-info/METADATA b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/idna-3.3.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..6446805d7dc9e5806e49e2b883137d22d1223ae5
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/idna-3.3.dist-info/METADATA
@@ -0,0 +1,236 @@
+Metadata-Version: 2.1
+Name: idna
+Version: 3.3
+Summary: Internationalized Domain Names in Applications (IDNA)
+Home-page: https://github.com/kjd/idna
+Author: Kim Davies
+Author-email: kim@cynosure.com.au
+License: BSD-3-Clause
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Internet :: Name Service (DNS)
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Utilities
+Requires-Python: >=3.5
+License-File: LICENSE.md
+
+Internationalized Domain Names in Applications (IDNA)
+=====================================================
+
+Support for the Internationalised Domain Names in Applications
+(IDNA) protocol as specified in `RFC 5891 `_.
+This is the latest version of the protocol and is sometimes referred to as
+“IDNA 2008”.
+
+This library also provides support for Unicode Technical Standard 46,
+`Unicode IDNA Compatibility Processing `_.
+
+This acts as a suitable replacement for the “encodings.idna” module that
+comes with the Python standard library, but which only supports the
+older superseded IDNA specification (`RFC 3490 `_).
+
+Basic functions are simply executed:
+
+.. code-block:: pycon
+
+ >>> import idna
+ >>> idna.encode('ドメイン.テスト')
+ b'xn--eckwd4c7c.xn--zckzah'
+ >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah'))
+ ドメイン.テスト
+
+
+Installation
+------------
+
+To install this library, you can use pip:
+
+.. code-block:: bash
+
+ $ pip install idna
+
+Alternatively, you can install the package using the bundled setup script:
+
+.. code-block:: bash
+
+ $ python setup.py install
+
+
+Usage
+-----
+
+For typical usage, the ``encode`` and ``decode`` functions will take a domain
+name argument and perform a conversion to A-labels or U-labels respectively.
+
+.. code-block:: pycon
+
+ >>> import idna
+ >>> idna.encode('ドメイン.テスト')
+ b'xn--eckwd4c7c.xn--zckzah'
+ >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah'))
+ ドメイン.テスト
+
+You may use the codec encoding and decoding methods using the
+``idna.codec`` module:
+
+.. code-block:: pycon
+
+ >>> import idna.codec
+ >>> print('домен.испытание'.encode('idna'))
+ b'xn--d1acufc.xn--80akhbyknj4f'
+ >>> print(b'xn--d1acufc.xn--80akhbyknj4f'.decode('idna'))
+ домен.испытание
+
+Conversions can be applied at a per-label basis using the ``ulabel`` or ``alabel``
+functions if necessary:
+
+.. code-block:: pycon
+
+ >>> idna.alabel('测试')
+ b'xn--0zwm56d'
+
+Compatibility Mapping (UTS #46)
++++++++++++++++++++++++++++++++
+
+As described in `RFC 5895 `_, the IDNA
+specification does not normalize input from different potential ways a user
+may input a domain name. This functionality, known as a “mapping”, is
+considered by the specification to be a local user-interface issue distinct
+from IDNA conversion functionality.
+
+This library provides one such mapping, that was developed by the Unicode
+Consortium. Known as `Unicode IDNA Compatibility Processing `_,
+it provides for both a regular mapping for typical applications, as well as
+a transitional mapping to help migrate from older IDNA 2003 applications.
+
+For example, “Königsgäßchen” is not a permissible label as *LATIN CAPITAL
+LETTER K* is not allowed (nor are capital letters in general). UTS 46 will
+convert this into lower case prior to applying the IDNA conversion.
+
+.. code-block:: pycon
+
+ >>> import idna
+ >>> idna.encode('Königsgäßchen')
+ ...
+ idna.core.InvalidCodepoint: Codepoint U+004B at position 1 of 'Königsgäßchen' not allowed
+ >>> idna.encode('Königsgäßchen', uts46=True)
+ b'xn--knigsgchen-b4a3dun'
+ >>> print(idna.decode('xn--knigsgchen-b4a3dun'))
+ königsgäßchen
+
+Transitional processing provides conversions to help transition from the older
+2003 standard to the current standard. For example, in the original IDNA
+specification, the *LATIN SMALL LETTER SHARP S* (ß) was converted into two
+*LATIN SMALL LETTER S* (ss), whereas in the current IDNA specification this
+conversion is not performed.
+
+.. code-block:: pycon
+
+ >>> idna.encode('Königsgäßchen', uts46=True, transitional=True)
+ 'xn--knigsgsschen-lcb0w'
+
+Implementors should use transitional processing with caution, only in rare
+cases where conversion from legacy labels to current labels must be performed
+(i.e. IDNA implementations that pre-date 2008). For typical applications
+that just need to convert labels, transitional processing is unlikely to be
+beneficial and could produce unexpected incompatible results.
+
+``encodings.idna`` Compatibility
+++++++++++++++++++++++++++++++++
+
+Function calls from the Python built-in ``encodings.idna`` module are
+mapped to their IDNA 2008 equivalents using the ``idna.compat`` module.
+Simply substitute the ``import`` clause in your code to refer to the
+new module name.
+
+Exceptions
+----------
+
+All errors raised during the conversion following the specification should
+raise an exception derived from the ``idna.IDNAError`` base class.
+
+More specific exceptions that may be generated as ``idna.IDNABidiError``
+when the error reflects an illegal combination of left-to-right and
+right-to-left characters in a label; ``idna.InvalidCodepoint`` when
+a specific codepoint is an illegal character in an IDN label (i.e.
+INVALID); and ``idna.InvalidCodepointContext`` when the codepoint is
+illegal based on its positional context (i.e. it is CONTEXTO or CONTEXTJ
+but the contextual requirements are not satisfied.)
+
+Building and Diagnostics
+------------------------
+
+The IDNA and UTS 46 functionality relies upon pre-calculated lookup
+tables for performance. These tables are derived from computing against
+eligibility criteria in the respective standards. These tables are
+computed using the command-line script ``tools/idna-data``.
+
+This tool will fetch relevant codepoint data from the Unicode repository
+and perform the required calculations to identify eligibility. There are
+three main modes:
+
+* ``idna-data make-libdata``. Generates ``idnadata.py`` and ``uts46data.py``,
+ the pre-calculated lookup tables using for IDNA and UTS 46 conversions. Implementors
+ who wish to track this library against a different Unicode version may use this tool
+ to manually generate a different version of the ``idnadata.py`` and ``uts46data.py``
+ files.
+
+* ``idna-data make-table``. Generate a table of the IDNA disposition
+ (e.g. PVALID, CONTEXTJ, CONTEXTO) in the format found in Appendix B.1 of RFC
+ 5892 and the pre-computed tables published by `IANA `_.
+
+* ``idna-data U+0061``. Prints debugging output on the various properties
+ associated with an individual Unicode codepoint (in this case, U+0061), that are
+ used to assess the IDNA and UTS 46 status of a codepoint. This is helpful in debugging
+ or analysis.
+
+The tool accepts a number of arguments, described using ``idna-data -h``. Most notably,
+the ``--version`` argument allows the specification of the version of Unicode to use
+in computing the table data. For example, ``idna-data --version 9.0.0 make-libdata``
+will generate library data against Unicode 9.0.0.
+
+
+Additional Notes
+----------------
+
+* **Packages**. The latest tagged release version is published in the
+ `Python Package Index `_.
+
+* **Version support**. This library supports Python 3.5 and higher. As this library
+ serves as a low-level toolkit for a variety of applications, many of which strive
+ for broad compatibility with older Python versions, there is no rush to remove
+ older intepreter support. Removing support for older versions should be well
+ justified in that the maintenance burden has become too high.
+
+* **Python 2**. Python 2 is supported by version 2.x of this library. While active
+ development of the version 2.x series has ended, notable issues being corrected
+ may be backported to 2.x. Use "idna<3" in your requirements file if you need this
+ library for a Python 2 application.
+
+* **Testing**. The library has a test suite based on each rule of the IDNA specification, as
+ well as tests that are provided as part of the Unicode Technical Standard 46,
+ `Unicode IDNA Compatibility Processing `_.
+
+* **Emoji**. It is an occasional request to support emoji domains in this library. Encoding
+ of symbols like emoji is expressly prohibited by the technical standard IDNA 2008 and
+ emoji domains are broadly phased out across the domain industry due to associated security
+ risks. For now, applications that wish need to support these non-compliant labels may
+ wish to consider trying the encode/decode operation in this library first, and then falling
+ back to using `encodings.idna`. See `the Github project `_
+ for more discussion.
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/idna-3.3.dist-info/RECORD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/idna-3.3.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..7bfd54e243d405d5997aaddc0e727968e0b13bfe
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/idna-3.3.dist-info/RECORD
@@ -0,0 +1,24 @@
+idna-3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+idna-3.3.dist-info/LICENSE.md,sha256=otbk2UC9JNvnuWRc3hmpeSzFHbeuDVrNMBrIYMqj6DY,1523
+idna-3.3.dist-info/METADATA,sha256=BdqiAf8ou4x1nzIHp2_sDfXWjl7BrSUGpOeVzbYHQuQ,9765
+idna-3.3.dist-info/RECORD,,
+idna-3.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+idna-3.3.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
+idna-3.3.dist-info/top_level.txt,sha256=jSag9sEDqvSPftxOQy-ABfGV_RSy7oFh4zZJpODV8k0,5
+idna/__init__.py,sha256=KJQN1eQBr8iIK5SKrJ47lXvxG0BJ7Lm38W4zT0v_8lk,849
+idna/__pycache__/__init__.cpython-38.pyc,,
+idna/__pycache__/codec.cpython-38.pyc,,
+idna/__pycache__/compat.cpython-38.pyc,,
+idna/__pycache__/core.cpython-38.pyc,,
+idna/__pycache__/idnadata.cpython-38.pyc,,
+idna/__pycache__/intranges.cpython-38.pyc,,
+idna/__pycache__/package_data.cpython-38.pyc,,
+idna/__pycache__/uts46data.cpython-38.pyc,,
+idna/codec.py,sha256=6ly5odKfqrytKT9_7UrlGklHnf1DSK2r9C6cSM4sa28,3374
+idna/compat.py,sha256=0_sOEUMT4CVw9doD3vyRhX80X19PwqFoUBs7gWsFME4,321
+idna/core.py,sha256=RFIkY-HhFZaDoBEFjGwyGd_vWI04uOAQjnzueMWqwOU,12795
+idna/idnadata.py,sha256=fzMzkCea2xieVxcrjngJ-2pLsKQNejPCZFlBajIuQdw,44025
+idna/intranges.py,sha256=YBr4fRYuWH7kTKS2tXlFjM24ZF1Pdvcir-aywniInqg,1881
+idna/package_data.py,sha256=szxQhV0ZD0nKJ84Kuobw3l8q4_KeCyXjFRdpwIpKZmw,21
+idna/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+idna/uts46data.py,sha256=o-D7V-a0fOLZNd7tvxof6MYfUd0TBZzE2bLR5XO67xU,204400
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/idna-3.3.dist-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/idna-3.3.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c40472e6fc2723c6dfeb7c305fcf6763edeedf2c
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/idna-3.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+idna
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_layoutgrid.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_layoutgrid.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b7b140f600b5c465ddaeeae52da8c0c7887571c
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_layoutgrid.py
@@ -0,0 +1,563 @@
+"""
+A layoutgrid is a nrows by ncols set of boxes, meant to be used by
+`._constrained_layout`, each box is analogous to a subplotspec element of
+a gridspec.
+
+Each box is defined by left[ncols], right[ncols], bottom[nrows] and top[nrows],
+and by two editable margins for each side. The main margin gets its value
+set by the size of ticklabels, titles, etc on each axes that is in the figure.
+The outer margin is the padding around the axes, and space for any
+colorbars.
+
+The "inner" widths and heights of these boxes are then constrained to be the
+same (relative the values of `width_ratios[ncols]` and `height_ratios[nrows]`).
+
+The layoutgrid is then constrained to be contained within a parent layoutgrid,
+its column(s) and row(s) specified when it is created.
+"""
+
+import itertools
+import kiwisolver as kiwi
+import logging
+import numpy as np
+from matplotlib.transforms import Bbox
+
+_log = logging.getLogger(__name__)
+
+
+class LayoutGrid:
+ """
+ Analogous to a gridspec, and contained in another LayoutGrid.
+ """
+
+ def __init__(self, parent=None, parent_pos=(0, 0),
+ parent_inner=False, name='', ncols=1, nrows=1,
+ h_pad=None, w_pad=None, width_ratios=None,
+ height_ratios=None):
+ Variable = kiwi.Variable
+ self.parent = parent
+ self.parent_pos = parent_pos
+ self.parent_inner = parent_inner
+ self.name = name + seq_id()
+ if parent is not None:
+ self.name = f'{parent.name}.{self.name}'
+ self.nrows = nrows
+ self.ncols = ncols
+ self.height_ratios = np.atleast_1d(height_ratios)
+ if height_ratios is None:
+ self.height_ratios = np.ones(nrows)
+ self.width_ratios = np.atleast_1d(width_ratios)
+ if width_ratios is None:
+ self.width_ratios = np.ones(ncols)
+
+ sn = self.name + '_'
+ if parent is None:
+ self.parent = None
+ self.solver = kiwi.Solver()
+ else:
+ self.parent = parent
+ parent.add_child(self, *parent_pos)
+ self.solver = self.parent.solver
+ # keep track of artist associated w/ this layout. Can be none
+ self.artists = np.empty((nrows, ncols), dtype=object)
+ self.children = np.empty((nrows, ncols), dtype=object)
+
+ self.margins = {}
+ self.margin_vals = {}
+ # all the boxes in each column share the same left/right margins:
+ for todo in ['left', 'right', 'leftcb', 'rightcb']:
+ # track the value so we can change only if a margin is larger
+ # than the current value
+ self.margin_vals[todo] = np.zeros(ncols)
+
+ sol = self.solver
+
+ # These are redundant, but make life easier if
+ # we define them all. All that is really
+ # needed is left/right, margin['left'], and margin['right']
+ self.widths = [Variable(f'{sn}widths[{i}]') for i in range(ncols)]
+ self.lefts = [Variable(f'{sn}lefts[{i}]') for i in range(ncols)]
+ self.rights = [Variable(f'{sn}rights[{i}]') for i in range(ncols)]
+ self.inner_widths = [Variable(f'{sn}inner_widths[{i}]')
+ for i in range(ncols)]
+ for todo in ['left', 'right', 'leftcb', 'rightcb']:
+ self.margins[todo] = [Variable(f'{sn}margins[{todo}][{i}]')
+ for i in range(ncols)]
+ for i in range(ncols):
+ sol.addEditVariable(self.margins[todo][i], 'strong')
+
+ for todo in ['bottom', 'top', 'bottomcb', 'topcb']:
+ self.margins[todo] = np.empty((nrows), dtype=object)
+ self.margin_vals[todo] = np.zeros(nrows)
+
+ self.heights = [Variable(f'{sn}heights[{i}]') for i in range(nrows)]
+ self.inner_heights = [Variable(f'{sn}inner_heights[{i}]')
+ for i in range(nrows)]
+ self.bottoms = [Variable(f'{sn}bottoms[{i}]') for i in range(nrows)]
+ self.tops = [Variable(f'{sn}tops[{i}]') for i in range(nrows)]
+ for todo in ['bottom', 'top', 'bottomcb', 'topcb']:
+ self.margins[todo] = [Variable(f'{sn}margins[{todo}][{i}]')
+ for i in range(nrows)]
+ for i in range(nrows):
+ sol.addEditVariable(self.margins[todo][i], 'strong')
+
+ # set these margins to zero by default. They will be edited as
+ # children are filled.
+ self.reset_margins()
+ self.add_constraints()
+
+ self.h_pad = h_pad
+ self.w_pad = w_pad
+
+ def __repr__(self):
+ str = f'LayoutBox: {self.name:25s} {self.nrows}x{self.ncols},\n'
+ for i in range(self.nrows):
+ for j in range(self.ncols):
+ str += f'{i}, {j}: '\
+ f'L({self.lefts[j].value():1.3f}, ' \
+ f'B{self.bottoms[i].value():1.3f}, ' \
+ f'W{self.widths[j].value():1.3f}, ' \
+ f'H{self.heights[i].value():1.3f}, ' \
+ f'innerW{self.inner_widths[j].value():1.3f}, ' \
+ f'innerH{self.inner_heights[i].value():1.3f}, ' \
+ f'ML{self.margins["left"][j].value():1.3f}, ' \
+ f'MR{self.margins["right"][j].value():1.3f}, \n'
+ return str
+
+ def reset_margins(self):
+ """
+ Reset all the margins to zero. Must do this after changing
+ figure size, for instance, because the relative size of the
+ axes labels etc changes.
+ """
+ for todo in ['left', 'right', 'bottom', 'top',
+ 'leftcb', 'rightcb', 'bottomcb', 'topcb']:
+ self.edit_margins(todo, 0.0)
+
+ def add_constraints(self):
+ # define self-consistent constraints
+ self.hard_constraints()
+ # define relationship with parent layoutgrid:
+ self.parent_constraints()
+ # define relative widths of the grid cells to each other
+ # and stack horizontally and vertically.
+ self.grid_constraints()
+
+ def hard_constraints(self):
+ """
+ These are the redundant constraints, plus ones that make the
+ rest of the code easier.
+ """
+ for i in range(self.ncols):
+ hc = [self.rights[i] >= self.lefts[i],
+ (self.rights[i] - self.margins['right'][i] -
+ self.margins['rightcb'][i] >=
+ self.lefts[i] - self.margins['left'][i] -
+ self.margins['leftcb'][i])
+ ]
+ for c in hc:
+ self.solver.addConstraint(c | 'required')
+
+ for i in range(self.nrows):
+ hc = [self.tops[i] >= self.bottoms[i],
+ (self.tops[i] - self.margins['top'][i] -
+ self.margins['topcb'][i] >=
+ self.bottoms[i] - self.margins['bottom'][i] -
+ self.margins['bottomcb'][i])
+ ]
+ for c in hc:
+ self.solver.addConstraint(c | 'required')
+
+ def add_child(self, child, i=0, j=0):
+ # np.ix_ returns the cross product of i and j indices
+ self.children[np.ix_(np.atleast_1d(i), np.atleast_1d(j))] = child
+
+ def parent_constraints(self):
+ # constraints that are due to the parent...
+ # i.e. the first column's left is equal to the
+ # parent's left, the last column right equal to the
+ # parent's right...
+ parent = self.parent
+ if parent is None:
+ hc = [self.lefts[0] == 0,
+ self.rights[-1] == 1,
+ # top and bottom reversed order...
+ self.tops[0] == 1,
+ self.bottoms[-1] == 0]
+ else:
+ rows, cols = self.parent_pos
+ rows = np.atleast_1d(rows)
+ cols = np.atleast_1d(cols)
+
+ left = parent.lefts[cols[0]]
+ right = parent.rights[cols[-1]]
+ top = parent.tops[rows[0]]
+ bottom = parent.bottoms[rows[-1]]
+ if self.parent_inner:
+ # the layout grid is contained inside the inner
+ # grid of the parent.
+ left += parent.margins['left'][cols[0]]
+ left += parent.margins['leftcb'][cols[0]]
+ right -= parent.margins['right'][cols[-1]]
+ right -= parent.margins['rightcb'][cols[-1]]
+ top -= parent.margins['top'][rows[0]]
+ top -= parent.margins['topcb'][rows[0]]
+ bottom += parent.margins['bottom'][rows[-1]]
+ bottom += parent.margins['bottomcb'][rows[-1]]
+ hc = [self.lefts[0] == left,
+ self.rights[-1] == right,
+ # from top to bottom
+ self.tops[0] == top,
+ self.bottoms[-1] == bottom]
+ for c in hc:
+ self.solver.addConstraint(c | 'required')
+
+ def grid_constraints(self):
+ # constrain the ratio of the inner part of the grids
+ # to be the same (relative to width_ratios)
+
+ # constrain widths:
+ w = (self.rights[0] - self.margins['right'][0] -
+ self.margins['rightcb'][0])
+ w = (w - self.lefts[0] - self.margins['left'][0] -
+ self.margins['leftcb'][0])
+ w0 = w / self.width_ratios[0]
+ # from left to right
+ for i in range(1, self.ncols):
+ w = (self.rights[i] - self.margins['right'][i] -
+ self.margins['rightcb'][i])
+ w = (w - self.lefts[i] - self.margins['left'][i] -
+ self.margins['leftcb'][i])
+ c = (w == w0 * self.width_ratios[i])
+ self.solver.addConstraint(c | 'strong')
+ # constrain the grid cells to be directly next to each other.
+ c = (self.rights[i - 1] == self.lefts[i])
+ self.solver.addConstraint(c | 'strong')
+
+ # constrain heights:
+ h = self.tops[0] - self.margins['top'][0] - self.margins['topcb'][0]
+ h = (h - self.bottoms[0] - self.margins['bottom'][0] -
+ self.margins['bottomcb'][0])
+ h0 = h / self.height_ratios[0]
+ # from top to bottom:
+ for i in range(1, self.nrows):
+ h = (self.tops[i] - self.margins['top'][i] -
+ self.margins['topcb'][i])
+ h = (h - self.bottoms[i] - self.margins['bottom'][i] -
+ self.margins['bottomcb'][i])
+ c = (h == h0 * self.height_ratios[i])
+ self.solver.addConstraint(c | 'strong')
+ # constrain the grid cells to be directly above each other.
+ c = (self.bottoms[i - 1] == self.tops[i])
+ self.solver.addConstraint(c | 'strong')
+
+ # Margin editing: The margins are variable and meant to
+ # contain things of a fixed size like axes labels, tick labels, titles
+ # etc
+ def edit_margin(self, todo, size, cell):
+ """
+ Change the size of the margin for one cell.
+
+ Parameters
+ ----------
+ todo : string (one of 'left', 'right', 'bottom', 'top')
+ margin to alter.
+
+ size : float
+ Size of the margin. If it is larger than the existing minimum it
+ updates the margin size. Fraction of figure size.
+
+ cell : int
+ Cell column or row to edit.
+ """
+ self.solver.suggestValue(self.margins[todo][cell], size)
+ self.margin_vals[todo][cell] = size
+
+ def edit_margin_min(self, todo, size, cell=0):
+ """
+ Change the minimum size of the margin for one cell.
+
+ Parameters
+ ----------
+ todo : string (one of 'left', 'right', 'bottom', 'top')
+ margin to alter.
+
+ size : float
+ Minimum size of the margin . If it is larger than the
+ existing minimum it updates the margin size. Fraction of
+ figure size.
+
+ cell : int
+ Cell column or row to edit.
+ """
+
+ if size > self.margin_vals[todo][cell]:
+ self.edit_margin(todo, size, cell)
+
+ def edit_margins(self, todo, size):
+ """
+ Change the size of all the margin of all the cells in the layout grid.
+
+ Parameters
+ ----------
+ todo : string (one of 'left', 'right', 'bottom', 'top')
+ margin to alter.
+
+ size : float
+ Size to set the margins. Fraction of figure size.
+ """
+
+ for i in range(len(self.margin_vals[todo])):
+ self.edit_margin(todo, size, i)
+
+ def edit_all_margins_min(self, todo, size):
+ """
+ Change the minimum size of all the margin of all
+ the cells in the layout grid.
+
+ Parameters
+ ----------
+ todo : {'left', 'right', 'bottom', 'top'}
+ The margin to alter.
+
+ size : float
+ Minimum size of the margin. If it is larger than the
+ existing minimum it updates the margin size. Fraction of
+ figure size.
+ """
+
+ for i in range(len(self.margin_vals[todo])):
+ self.edit_margin_min(todo, size, i)
+
+ def edit_outer_margin_mins(self, margin, ss):
+ """
+ Edit all four margin minimums in one statement.
+
+ Parameters
+ ----------
+ margin : dict
+ size of margins in a dict with keys 'left', 'right', 'bottom',
+ 'top'
+
+ ss : SubplotSpec
+ defines the subplotspec these margins should be applied to
+ """
+
+ self.edit_margin_min('left', margin['left'], ss.colspan.start)
+ self.edit_margin_min('leftcb', margin['leftcb'], ss.colspan.start)
+ self.edit_margin_min('right', margin['right'], ss.colspan.stop - 1)
+ self.edit_margin_min('rightcb', margin['rightcb'], ss.colspan.stop - 1)
+ # rows are from the top down:
+ self.edit_margin_min('top', margin['top'], ss.rowspan.start)
+ self.edit_margin_min('topcb', margin['topcb'], ss.rowspan.start)
+ self.edit_margin_min('bottom', margin['bottom'], ss.rowspan.stop - 1)
+ self.edit_margin_min('bottomcb', margin['bottomcb'],
+ ss.rowspan.stop - 1)
+
+ def get_margins(self, todo, col):
+ """Return the margin at this position"""
+ return self.margin_vals[todo][col]
+
+ def get_outer_bbox(self, rows=0, cols=0):
+ """
+ Return the outer bounding box of the subplot specs
+ given by rows and cols. rows and cols can be spans.
+ """
+ rows = np.atleast_1d(rows)
+ cols = np.atleast_1d(cols)
+
+ bbox = Bbox.from_extents(
+ self.lefts[cols[0]].value(),
+ self.bottoms[rows[-1]].value(),
+ self.rights[cols[-1]].value(),
+ self.tops[rows[0]].value())
+ return bbox
+
+ def get_inner_bbox(self, rows=0, cols=0):
+ """
+ Return the inner bounding box of the subplot specs
+ given by rows and cols. rows and cols can be spans.
+ """
+ rows = np.atleast_1d(rows)
+ cols = np.atleast_1d(cols)
+
+ bbox = Bbox.from_extents(
+ (self.lefts[cols[0]].value() +
+ self.margins['left'][cols[0]].value() +
+ self.margins['leftcb'][cols[0]].value()),
+ (self.bottoms[rows[-1]].value() +
+ self.margins['bottom'][rows[-1]].value() +
+ self.margins['bottomcb'][rows[-1]].value()),
+ (self.rights[cols[-1]].value() -
+ self.margins['right'][cols[-1]].value() -
+ self.margins['rightcb'][cols[-1]].value()),
+ (self.tops[rows[0]].value() -
+ self.margins['top'][rows[0]].value() -
+ self.margins['topcb'][rows[0]].value())
+ )
+ return bbox
+
+ def get_bbox_for_cb(self, rows=0, cols=0):
+ """
+ Return the bounding box that includes the
+ decorations but, *not* the colorbar...
+ """
+ rows = np.atleast_1d(rows)
+ cols = np.atleast_1d(cols)
+
+ bbox = Bbox.from_extents(
+ (self.lefts[cols[0]].value() +
+ self.margins['leftcb'][cols[0]].value()),
+ (self.bottoms[rows[-1]].value() +
+ self.margins['bottomcb'][rows[-1]].value()),
+ (self.rights[cols[-1]].value() -
+ self.margins['rightcb'][cols[-1]].value()),
+ (self.tops[rows[0]].value() -
+ self.margins['topcb'][rows[0]].value())
+ )
+ return bbox
+
+ def get_left_margin_bbox(self, rows=0, cols=0):
+ """
+ Return the left margin bounding box of the subplot specs
+ given by rows and cols. rows and cols can be spans.
+ """
+ rows = np.atleast_1d(rows)
+ cols = np.atleast_1d(cols)
+
+ bbox = Bbox.from_extents(
+ (self.lefts[cols[0]].value() +
+ self.margins['leftcb'][cols[0]].value()),
+ (self.bottoms[rows[-1]].value()),
+ (self.lefts[cols[0]].value() +
+ self.margins['leftcb'][cols[0]].value() +
+ self.margins['left'][cols[0]].value()),
+ (self.tops[rows[0]].value()))
+ return bbox
+
+ def get_bottom_margin_bbox(self, rows=0, cols=0):
+ """
+ Return the left margin bounding box of the subplot specs
+ given by rows and cols. rows and cols can be spans.
+ """
+ rows = np.atleast_1d(rows)
+ cols = np.atleast_1d(cols)
+
+ bbox = Bbox.from_extents(
+ (self.lefts[cols[0]].value()),
+ (self.bottoms[rows[-1]].value() +
+ self.margins['bottomcb'][rows[-1]].value()),
+ (self.rights[cols[-1]].value()),
+ (self.bottoms[rows[-1]].value() +
+ self.margins['bottom'][rows[-1]].value() +
+ self.margins['bottomcb'][rows[-1]].value()
+ ))
+ return bbox
+
+ def get_right_margin_bbox(self, rows=0, cols=0):
+ """
+ Return the left margin bounding box of the subplot specs
+ given by rows and cols. rows and cols can be spans.
+ """
+ rows = np.atleast_1d(rows)
+ cols = np.atleast_1d(cols)
+
+ bbox = Bbox.from_extents(
+ (self.rights[cols[-1]].value() -
+ self.margins['right'][cols[-1]].value() -
+ self.margins['rightcb'][cols[-1]].value()),
+ (self.bottoms[rows[-1]].value()),
+ (self.rights[cols[-1]].value() -
+ self.margins['rightcb'][cols[-1]].value()),
+ (self.tops[rows[0]].value()))
+ return bbox
+
+ def get_top_margin_bbox(self, rows=0, cols=0):
+ """
+ Return the left margin bounding box of the subplot specs
+ given by rows and cols. rows and cols can be spans.
+ """
+ rows = np.atleast_1d(rows)
+ cols = np.atleast_1d(cols)
+
+ bbox = Bbox.from_extents(
+ (self.lefts[cols[0]].value()),
+ (self.tops[rows[0]].value() -
+ self.margins['topcb'][rows[0]].value()),
+ (self.rights[cols[-1]].value()),
+ (self.tops[rows[0]].value() -
+ self.margins['topcb'][rows[0]].value() -
+ self.margins['top'][rows[0]].value()))
+ return bbox
+
+ def update_variables(self):
+ """
+ Update the variables for the solver attached to this layoutgrid.
+ """
+ self.solver.updateVariables()
+
+_layoutboxobjnum = itertools.count()
+
+
+def seq_id():
+ """Generate a short sequential id for layoutbox objects."""
+ return '%06d' % next(_layoutboxobjnum)
+
+
+def print_children(lb):
+ """Print the children of the layoutbox."""
+ for child in lb.children:
+ print_children(child)
+
+
+def plot_children(fig, lg=None, level=0, printit=False):
+ """Simple plotting to show where boxes are."""
+ import matplotlib.pyplot as plt
+ import matplotlib.patches as mpatches
+
+ if lg is None:
+ _layoutgrids = fig.execute_constrained_layout()
+ lg = _layoutgrids[fig]
+ colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
+ col = colors[level]
+ for i in range(lg.nrows):
+ for j in range(lg.ncols):
+ bb = lg.get_outer_bbox(rows=i, cols=j)
+ fig.add_artist(
+ mpatches.Rectangle(bb.p0, bb.width, bb.height, linewidth=1,
+ edgecolor='0.7', facecolor='0.7',
+ alpha=0.2, transform=fig.transFigure,
+ zorder=-3))
+ bbi = lg.get_inner_bbox(rows=i, cols=j)
+ fig.add_artist(
+ mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=2,
+ edgecolor=col, facecolor='none',
+ transform=fig.transFigure, zorder=-2))
+
+ bbi = lg.get_left_margin_bbox(rows=i, cols=j)
+ fig.add_artist(
+ mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
+ edgecolor='none', alpha=0.2,
+ facecolor=[0.5, 0.7, 0.5],
+ transform=fig.transFigure, zorder=-2))
+ bbi = lg.get_right_margin_bbox(rows=i, cols=j)
+ fig.add_artist(
+ mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
+ edgecolor='none', alpha=0.2,
+ facecolor=[0.7, 0.5, 0.5],
+ transform=fig.transFigure, zorder=-2))
+ bbi = lg.get_bottom_margin_bbox(rows=i, cols=j)
+ fig.add_artist(
+ mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
+ edgecolor='none', alpha=0.2,
+ facecolor=[0.5, 0.5, 0.7],
+ transform=fig.transFigure, zorder=-2))
+ bbi = lg.get_top_margin_bbox(rows=i, cols=j)
+ fig.add_artist(
+ mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0,
+ edgecolor='none', alpha=0.2,
+ facecolor=[0.7, 0.2, 0.7],
+ transform=fig.transFigure, zorder=-2))
+ for ch in lg.children.flat:
+ if ch is not None:
+ plot_children(fig, ch, level=level+1)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_mathtext.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_mathtext.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea652ba916253e30470c43dbd5873dff6f28fd04
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_mathtext.py
@@ -0,0 +1,2942 @@
+"""
+Implementation details for :mod:`.mathtext`.
+"""
+
+from collections import namedtuple
+import enum
+import functools
+from io import StringIO
+import logging
+import os
+import types
+import unicodedata
+
+import numpy as np
+from pyparsing import (
+ Combine, Empty, Forward, Group, Literal, oneOf, OneOrMore,
+ Optional, ParseBaseException, ParseFatalException, ParserElement,
+ ParseResults, QuotedString, Regex, StringEnd, Suppress, White, ZeroOrMore)
+
+import matplotlib as mpl
+from . import _api, cbook
+from ._mathtext_data import (
+ latex_to_bakoma, latex_to_standard, stix_glyph_fixes, stix_virtual_fonts,
+ tex2uni)
+from .afm import AFM
+from .font_manager import FontProperties, findfont, get_font
+from .ft2font import KERNING_DEFAULT
+
+
+ParserElement.enablePackrat()
+_log = logging.getLogger("matplotlib.mathtext")
+
+
+##############################################################################
+# FONTS
+
+
+def get_unicode_index(symbol, math=True):
+ r"""
+ Return the integer index (from the Unicode table) of *symbol*.
+
+ Parameters
+ ----------
+ symbol : str
+ A single unicode character, a TeX command (e.g. r'\pi') or a Type1
+ symbol name (e.g. 'phi').
+ math : bool, default: True
+ If False, always treat as a single unicode character.
+ """
+ # for a non-math symbol, simply return its unicode index
+ if not math:
+ return ord(symbol)
+ # From UTF #25: U+2212 minus sign is the preferred
+ # representation of the unary and binary minus sign rather than
+ # the ASCII-derived U+002D hyphen-minus, because minus sign is
+ # unambiguous and because it is rendered with a more desirable
+ # length, usually longer than a hyphen.
+ if symbol == '-':
+ return 0x2212
+ try: # This will succeed if symbol is a single unicode char
+ return ord(symbol)
+ except TypeError:
+ pass
+ try: # Is symbol a TeX symbol (i.e. \alpha)
+ return tex2uni[symbol.strip("\\")]
+ except KeyError as err:
+ raise ValueError(
+ "'{}' is not a valid Unicode character or TeX/Type1 symbol"
+ .format(symbol)) from err
+
+
+class Fonts:
+ """
+ An abstract base class for a system of fonts to use for mathtext.
+
+ The class must be able to take symbol keys and font file names and
+ return the character metrics. It also delegates to a backend class
+ to do the actual drawing.
+ """
+
+ def __init__(self, default_font_prop, mathtext_backend):
+ """
+ Parameters
+ ----------
+ default_font_prop : `~.font_manager.FontProperties`
+ The default non-math font, or the base font for Unicode (generic)
+ font rendering.
+ mathtext_backend : `MathtextBackend` subclass
+ Backend to which rendering is actually delegated.
+ """
+ self.default_font_prop = default_font_prop
+ self.mathtext_backend = mathtext_backend
+ self.used_characters = {}
+
+ @_api.deprecated("3.4")
+ def destroy(self):
+ """
+ Fix any cyclical references before the object is about
+ to be destroyed.
+ """
+ self.used_characters = None
+
+ def get_kern(self, font1, fontclass1, sym1, fontsize1,
+ font2, fontclass2, sym2, fontsize2, dpi):
+ """
+ Get the kerning distance for font between *sym1* and *sym2*.
+
+ See `~.Fonts.get_metrics` for a detailed description of the parameters.
+ """
+ return 0.
+
+ def get_metrics(self, font, font_class, sym, fontsize, dpi, math=True):
+ r"""
+ Parameters
+ ----------
+ font : str
+ One of the TeX font names: "tt", "it", "rm", "cal", "sf", "bf",
+ "default", "regular", "bb", "frak", "scr". "default" and "regular"
+ are synonyms and use the non-math font.
+ font_class : str
+ One of the TeX font names (as for *font*), but **not** "bb",
+ "frak", or "scr". This is used to combine two font classes. The
+ only supported combination currently is ``get_metrics("frak", "bf",
+ ...)``.
+ sym : str
+ A symbol in raw TeX form, e.g., "1", "x", or "\sigma".
+ fontsize : float
+ Font size in points.
+ dpi : float
+ Rendering dots-per-inch.
+ math : bool
+ Whether we are currently in math mode or not.
+
+ Returns
+ -------
+ object
+
+ The returned object has the following attributes (all floats,
+ except *slanted*):
+
+ - *advance*: The advance distance (in points) of the glyph.
+ - *height*: The height of the glyph in points.
+ - *width*: The width of the glyph in points.
+ - *xmin*, *xmax*, *ymin*, *ymax*: The ink rectangle of the glyph
+ - *iceberg*: The distance from the baseline to the top of the
+ glyph. (This corresponds to TeX's definition of "height".)
+ - *slanted*: Whether the glyph should be considered as "slanted"
+ (currently used for kerning sub/superscripts).
+ """
+ info = self._get_info(font, font_class, sym, fontsize, dpi, math)
+ return info.metrics
+
+ def set_canvas_size(self, w, h, d):
+ """
+ Set the size of the buffer used to render the math expression.
+ Only really necessary for the bitmap backends.
+ """
+ self.width, self.height, self.depth = np.ceil([w, h, d])
+ self.mathtext_backend.set_canvas_size(
+ self.width, self.height, self.depth)
+
+ @_api.rename_parameter("3.4", "facename", "font")
+ def render_glyph(self, ox, oy, font, font_class, sym, fontsize, dpi):
+ """
+ At position (*ox*, *oy*), draw the glyph specified by the remaining
+ parameters (see `get_metrics` for their detailed description).
+ """
+ info = self._get_info(font, font_class, sym, fontsize, dpi)
+ self.used_characters.setdefault(info.font.fname, set()).add(info.num)
+ self.mathtext_backend.render_glyph(ox, oy, info)
+
+ def render_rect_filled(self, x1, y1, x2, y2):
+ """
+ Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
+ """
+ self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
+
+ def get_xheight(self, font, fontsize, dpi):
+ """
+ Get the xheight for the given *font* and *fontsize*.
+ """
+ raise NotImplementedError()
+
+ def get_underline_thickness(self, font, fontsize, dpi):
+ """
+ Get the line thickness that matches the given font. Used as a
+ base unit for drawing lines such as in a fraction or radical.
+ """
+ raise NotImplementedError()
+
+ def get_used_characters(self):
+ """
+ Get the set of characters that were used in the math
+ expression. Used by backends that need to subset fonts so
+ they know which glyphs to include.
+ """
+ return self.used_characters
+
+ def get_results(self, box):
+ """
+ Get the data needed by the backend to render the math
+ expression. The return value is backend-specific.
+ """
+ result = self.mathtext_backend.get_results(
+ box, self.get_used_characters())
+ if self.destroy != TruetypeFonts.destroy.__get__(self):
+ destroy = _api.deprecate_method_override(
+ __class__.destroy, self, since="3.4")
+ if destroy:
+ destroy()
+ return result
+
+ def get_sized_alternatives_for_symbol(self, fontname, sym):
+ """
+ Override if your font provides multiple sizes of the same
+ symbol. Should return a list of symbols matching *sym* in
+ various sizes. The expression renderer will select the most
+ appropriate size for a given situation from this list.
+ """
+ return [(fontname, sym)]
+
+
+class TruetypeFonts(Fonts):
+ """
+ A generic base class for all font setups that use Truetype fonts
+ (through FT2Font).
+ """
+ def __init__(self, default_font_prop, mathtext_backend):
+ super().__init__(default_font_prop, mathtext_backend)
+ self.glyphd = {}
+ self._fonts = {}
+
+ filename = findfont(default_font_prop)
+ default_font = get_font(filename)
+ self._fonts['default'] = default_font
+ self._fonts['regular'] = default_font
+
+ @_api.deprecated("3.4")
+ def destroy(self):
+ self.glyphd = None
+ super().destroy()
+
+ def _get_font(self, font):
+ if font in self.fontmap:
+ basename = self.fontmap[font]
+ else:
+ basename = font
+ cached_font = self._fonts.get(basename)
+ if cached_font is None and os.path.exists(basename):
+ cached_font = get_font(basename)
+ self._fonts[basename] = cached_font
+ self._fonts[cached_font.postscript_name] = cached_font
+ self._fonts[cached_font.postscript_name.lower()] = cached_font
+ return cached_font
+
+ def _get_offset(self, font, glyph, fontsize, dpi):
+ if font.postscript_name == 'Cmex10':
+ return (glyph.height / 64 / 2) + (fontsize/3 * dpi/72)
+ return 0.
+
+ def _get_info(self, fontname, font_class, sym, fontsize, dpi, math=True):
+ key = fontname, font_class, sym, fontsize, dpi
+ bunch = self.glyphd.get(key)
+ if bunch is not None:
+ return bunch
+
+ font, num, symbol_name, fontsize, slanted = \
+ self._get_glyph(fontname, font_class, sym, fontsize, math)
+
+ font.set_size(fontsize, dpi)
+ glyph = font.load_char(
+ num,
+ flags=self.mathtext_backend.get_hinting_type())
+
+ xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
+ offset = self._get_offset(font, glyph, fontsize, dpi)
+ metrics = types.SimpleNamespace(
+ advance = glyph.linearHoriAdvance/65536.0,
+ height = glyph.height/64.0,
+ width = glyph.width/64.0,
+ xmin = xmin,
+ xmax = xmax,
+ ymin = ymin+offset,
+ ymax = ymax+offset,
+ # iceberg is the equivalent of TeX's "height"
+ iceberg = glyph.horiBearingY/64.0 + offset,
+ slanted = slanted
+ )
+
+ result = self.glyphd[key] = types.SimpleNamespace(
+ font = font,
+ fontsize = fontsize,
+ postscript_name = font.postscript_name,
+ metrics = metrics,
+ symbol_name = symbol_name,
+ num = num,
+ glyph = glyph,
+ offset = offset
+ )
+ return result
+
+ def get_xheight(self, fontname, fontsize, dpi):
+ font = self._get_font(fontname)
+ font.set_size(fontsize, dpi)
+ pclt = font.get_sfnt_table('pclt')
+ if pclt is None:
+ # Some fonts don't store the xHeight, so we do a poor man's xHeight
+ metrics = self.get_metrics(
+ fontname, mpl.rcParams['mathtext.default'], 'x', fontsize, dpi)
+ return metrics.iceberg
+ xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
+ return xHeight
+
+ def get_underline_thickness(self, font, fontsize, dpi):
+ # This function used to grab underline thickness from the font
+ # metrics, but that information is just too un-reliable, so it
+ # is now hardcoded.
+ return ((0.75 / 12.0) * fontsize * dpi) / 72.0
+
+ def get_kern(self, font1, fontclass1, sym1, fontsize1,
+ font2, fontclass2, sym2, fontsize2, dpi):
+ if font1 == font2 and fontsize1 == fontsize2:
+ info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
+ info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
+ font = info1.font
+ return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64
+ return super().get_kern(font1, fontclass1, sym1, fontsize1,
+ font2, fontclass2, sym2, fontsize2, dpi)
+
+
+class BakomaFonts(TruetypeFonts):
+ """
+ Use the Bakoma TrueType fonts for rendering.
+
+ Symbols are strewn about a number of font files, each of which has
+ its own proprietary 8-bit encoding.
+ """
+ _fontmap = {
+ 'cal': 'cmsy10',
+ 'rm': 'cmr10',
+ 'tt': 'cmtt10',
+ 'it': 'cmmi10',
+ 'bf': 'cmb10',
+ 'sf': 'cmss10',
+ 'ex': 'cmex10',
+ }
+
+ def __init__(self, *args, **kwargs):
+ self._stix_fallback = StixFonts(*args, **kwargs)
+
+ super().__init__(*args, **kwargs)
+ self.fontmap = {}
+ for key, val in self._fontmap.items():
+ fullpath = findfont(val)
+ self.fontmap[key] = fullpath
+ self.fontmap[val] = fullpath
+
+ _slanted_symbols = set(r"\int \oint".split())
+
+ def _get_glyph(self, fontname, font_class, sym, fontsize, math=True):
+ symbol_name = None
+ font = None
+ if fontname in self.fontmap and sym in latex_to_bakoma:
+ basename, num = latex_to_bakoma[sym]
+ slanted = (basename == "cmmi10") or sym in self._slanted_symbols
+ font = self._get_font(basename)
+ elif len(sym) == 1:
+ slanted = (fontname == "it")
+ font = self._get_font(fontname)
+ if font is not None:
+ num = ord(sym)
+
+ if font is not None:
+ gid = font.get_char_index(num)
+ if gid != 0:
+ symbol_name = font.get_glyph_name(gid)
+
+ if symbol_name is None:
+ return self._stix_fallback._get_glyph(
+ fontname, font_class, sym, fontsize, math)
+
+ return font, num, symbol_name, fontsize, slanted
+
+ # The Bakoma fonts contain many pre-sized alternatives for the
+ # delimiters. The AutoSizedChar class will use these alternatives
+ # and select the best (closest sized) glyph.
+ _size_alternatives = {
+ '(': [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
+ ('ex', '\xb5'), ('ex', '\xc3')],
+ ')': [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
+ ('ex', '\xb6'), ('ex', '\x21')],
+ '{': [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
+ ('ex', '\xbd'), ('ex', '\x28')],
+ '}': [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
+ ('ex', '\xbe'), ('ex', '\x29')],
+ # The fourth size of '[' is mysteriously missing from the BaKoMa
+ # font, so I've omitted it for both '[' and ']'
+ '[': [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
+ ('ex', '\x22')],
+ ']': [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
+ ('ex', '\x23')],
+ r'\lfloor': [('ex', '\xa5'), ('ex', '\x6a'),
+ ('ex', '\xb9'), ('ex', '\x24')],
+ r'\rfloor': [('ex', '\xa6'), ('ex', '\x6b'),
+ ('ex', '\xba'), ('ex', '\x25')],
+ r'\lceil': [('ex', '\xa7'), ('ex', '\x6c'),
+ ('ex', '\xbb'), ('ex', '\x26')],
+ r'\rceil': [('ex', '\xa8'), ('ex', '\x6d'),
+ ('ex', '\xbc'), ('ex', '\x27')],
+ r'\langle': [('ex', '\xad'), ('ex', '\x44'),
+ ('ex', '\xbf'), ('ex', '\x2a')],
+ r'\rangle': [('ex', '\xae'), ('ex', '\x45'),
+ ('ex', '\xc0'), ('ex', '\x2b')],
+ r'\__sqrt__': [('ex', '\x70'), ('ex', '\x71'),
+ ('ex', '\x72'), ('ex', '\x73')],
+ r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
+ ('ex', '\xc2'), ('ex', '\x2d')],
+ r'/': [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
+ ('ex', '\xcb'), ('ex', '\x2c')],
+ r'\widehat': [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
+ ('ex', '\x64')],
+ r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
+ ('ex', '\x67')],
+ r'<': [('cal', 'h'), ('ex', 'D')],
+ r'>': [('cal', 'i'), ('ex', 'E')]
+ }
+
+ for alias, target in [(r'\leftparen', '('),
+ (r'\rightparent', ')'),
+ (r'\leftbrace', '{'),
+ (r'\rightbrace', '}'),
+ (r'\leftbracket', '['),
+ (r'\rightbracket', ']'),
+ (r'\{', '{'),
+ (r'\}', '}'),
+ (r'\[', '['),
+ (r'\]', ']')]:
+ _size_alternatives[alias] = _size_alternatives[target]
+
+ def get_sized_alternatives_for_symbol(self, fontname, sym):
+ return self._size_alternatives.get(sym, [(fontname, sym)])
+
+
+class UnicodeFonts(TruetypeFonts):
+ """
+ An abstract base class for handling Unicode fonts.
+
+ While some reasonably complete Unicode fonts (such as DejaVu) may
+ work in some situations, the only Unicode font I'm aware of with a
+ complete set of math symbols is STIX.
+
+ This class will "fallback" on the Bakoma fonts when a required
+ symbol can not be found in the font.
+ """
+ use_cmex = True # Unused; delete once mathtext becomes private.
+
+ def __init__(self, *args, **kwargs):
+ # This must come first so the backend's owner is set correctly
+ fallback_rc = mpl.rcParams['mathtext.fallback']
+ font_cls = {'stix': StixFonts,
+ 'stixsans': StixSansFonts,
+ 'cm': BakomaFonts
+ }.get(fallback_rc)
+ self.cm_fallback = font_cls(*args, **kwargs) if font_cls else None
+
+ super().__init__(*args, **kwargs)
+ self.fontmap = {}
+ for texfont in "cal rm tt it bf sf".split():
+ prop = mpl.rcParams['mathtext.' + texfont]
+ font = findfont(prop)
+ self.fontmap[texfont] = font
+ prop = FontProperties('cmex10')
+ font = findfont(prop)
+ self.fontmap['ex'] = font
+
+ # include STIX sized alternatives for glyphs if fallback is STIX
+ if isinstance(self.cm_fallback, StixFonts):
+ stixsizedaltfonts = {
+ 0: 'STIXGeneral',
+ 1: 'STIXSizeOneSym',
+ 2: 'STIXSizeTwoSym',
+ 3: 'STIXSizeThreeSym',
+ 4: 'STIXSizeFourSym',
+ 5: 'STIXSizeFiveSym'}
+
+ for size, name in stixsizedaltfonts.items():
+ fullpath = findfont(name)
+ self.fontmap[size] = fullpath
+ self.fontmap[name] = fullpath
+
+ _slanted_symbols = set(r"\int \oint".split())
+
+ def _map_virtual_font(self, fontname, font_class, uniindex):
+ return fontname, uniindex
+
+ def _get_glyph(self, fontname, font_class, sym, fontsize, math=True):
+ try:
+ uniindex = get_unicode_index(sym, math)
+ found_symbol = True
+ except ValueError:
+ uniindex = ord('?')
+ found_symbol = False
+ _log.warning("No TeX to unicode mapping for {!a}.".format(sym))
+
+ fontname, uniindex = self._map_virtual_font(
+ fontname, font_class, uniindex)
+
+ new_fontname = fontname
+
+ # Only characters in the "Letter" class should be italicized in 'it'
+ # mode. Greek capital letters should be Roman.
+ if found_symbol:
+ if fontname == 'it' and uniindex < 0x10000:
+ char = chr(uniindex)
+ if (unicodedata.category(char)[0] != "L"
+ or unicodedata.name(char).startswith("GREEK CAPITAL")):
+ new_fontname = 'rm'
+
+ slanted = (new_fontname == 'it') or sym in self._slanted_symbols
+ found_symbol = False
+ font = self._get_font(new_fontname)
+ if font is not None:
+ if font.family_name == "cmr10" and uniindex == 0x2212:
+ # minus sign exists in cmsy10 (not cmr10)
+ font = get_font(
+ cbook._get_data_path("fonts/ttf/cmsy10.ttf"))
+ uniindex = 0xa1
+ glyphindex = font.get_char_index(uniindex)
+ if glyphindex != 0:
+ found_symbol = True
+
+ if not found_symbol:
+ if self.cm_fallback:
+ if (fontname in ('it', 'regular')
+ and isinstance(self.cm_fallback, StixFonts)):
+ fontname = 'rm'
+
+ g = self.cm_fallback._get_glyph(fontname, font_class,
+ sym, fontsize)
+ fname = g[0].family_name
+ if fname in list(BakomaFonts._fontmap.values()):
+ fname = "Computer Modern"
+ _log.info("Substituting symbol %s from %s", sym, fname)
+ return g
+
+ else:
+ if (fontname in ('it', 'regular')
+ and isinstance(self, StixFonts)):
+ return self._get_glyph('rm', font_class, sym, fontsize)
+ _log.warning("Font {!r} does not have a glyph for {!a} "
+ "[U+{:x}], substituting with a dummy "
+ "symbol.".format(new_fontname, sym, uniindex))
+ fontname = 'rm'
+ font = self._get_font(fontname)
+ uniindex = 0xA4 # currency char, for lack of anything better
+ glyphindex = font.get_char_index(uniindex)
+ slanted = False
+
+ symbol_name = font.get_glyph_name(glyphindex)
+ return font, uniindex, symbol_name, fontsize, slanted
+
+ def get_sized_alternatives_for_symbol(self, fontname, sym):
+ if self.cm_fallback:
+ return self.cm_fallback.get_sized_alternatives_for_symbol(
+ fontname, sym)
+ return [(fontname, sym)]
+
+
+class DejaVuFonts(UnicodeFonts):
+ use_cmex = False # Unused; delete once mathtext becomes private.
+
+ def __init__(self, *args, **kwargs):
+ # This must come first so the backend's owner is set correctly
+ if isinstance(self, DejaVuSerifFonts):
+ self.cm_fallback = StixFonts(*args, **kwargs)
+ else:
+ self.cm_fallback = StixSansFonts(*args, **kwargs)
+ self.bakoma = BakomaFonts(*args, **kwargs)
+ TruetypeFonts.__init__(self, *args, **kwargs)
+ self.fontmap = {}
+ # Include Stix sized alternatives for glyphs
+ self._fontmap.update({
+ 1: 'STIXSizeOneSym',
+ 2: 'STIXSizeTwoSym',
+ 3: 'STIXSizeThreeSym',
+ 4: 'STIXSizeFourSym',
+ 5: 'STIXSizeFiveSym',
+ })
+ for key, name in self._fontmap.items():
+ fullpath = findfont(name)
+ self.fontmap[key] = fullpath
+ self.fontmap[name] = fullpath
+
+ def _get_glyph(self, fontname, font_class, sym, fontsize, math=True):
+ # Override prime symbol to use Bakoma.
+ if sym == r'\prime':
+ return self.bakoma._get_glyph(
+ fontname, font_class, sym, fontsize, math)
+ else:
+ # check whether the glyph is available in the display font
+ uniindex = get_unicode_index(sym)
+ font = self._get_font('ex')
+ if font is not None:
+ glyphindex = font.get_char_index(uniindex)
+ if glyphindex != 0:
+ return super()._get_glyph(
+ 'ex', font_class, sym, fontsize, math)
+ # otherwise return regular glyph
+ return super()._get_glyph(
+ fontname, font_class, sym, fontsize, math)
+
+
+class DejaVuSerifFonts(DejaVuFonts):
+ """
+ A font handling class for the DejaVu Serif fonts
+
+ If a glyph is not found it will fallback to Stix Serif
+ """
+ _fontmap = {
+ 'rm': 'DejaVu Serif',
+ 'it': 'DejaVu Serif:italic',
+ 'bf': 'DejaVu Serif:weight=bold',
+ 'sf': 'DejaVu Sans',
+ 'tt': 'DejaVu Sans Mono',
+ 'ex': 'DejaVu Serif Display',
+ 0: 'DejaVu Serif',
+ }
+
+
+class DejaVuSansFonts(DejaVuFonts):
+ """
+ A font handling class for the DejaVu Sans fonts
+
+ If a glyph is not found it will fallback to Stix Sans
+ """
+ _fontmap = {
+ 'rm': 'DejaVu Sans',
+ 'it': 'DejaVu Sans:italic',
+ 'bf': 'DejaVu Sans:weight=bold',
+ 'sf': 'DejaVu Sans',
+ 'tt': 'DejaVu Sans Mono',
+ 'ex': 'DejaVu Sans Display',
+ 0: 'DejaVu Sans',
+ }
+
+
+class StixFonts(UnicodeFonts):
+ """
+ A font handling class for the STIX fonts.
+
+ In addition to what UnicodeFonts provides, this class:
+
+ - supports "virtual fonts" which are complete alpha numeric
+ character sets with different font styles at special Unicode
+ code points, such as "Blackboard".
+
+ - handles sized alternative characters for the STIXSizeX fonts.
+ """
+ _fontmap = {
+ 'rm': 'STIXGeneral',
+ 'it': 'STIXGeneral:italic',
+ 'bf': 'STIXGeneral:weight=bold',
+ 'nonunirm': 'STIXNonUnicode',
+ 'nonuniit': 'STIXNonUnicode:italic',
+ 'nonunibf': 'STIXNonUnicode:weight=bold',
+ 0: 'STIXGeneral',
+ 1: 'STIXSizeOneSym',
+ 2: 'STIXSizeTwoSym',
+ 3: 'STIXSizeThreeSym',
+ 4: 'STIXSizeFourSym',
+ 5: 'STIXSizeFiveSym',
+ }
+ use_cmex = False # Unused; delete once mathtext becomes private.
+ cm_fallback = False
+ _sans = False
+
+ def __init__(self, *args, **kwargs):
+ TruetypeFonts.__init__(self, *args, **kwargs)
+ self.fontmap = {}
+ for key, name in self._fontmap.items():
+ fullpath = findfont(name)
+ self.fontmap[key] = fullpath
+ self.fontmap[name] = fullpath
+
+ def _map_virtual_font(self, fontname, font_class, uniindex):
+ # Handle these "fonts" that are actually embedded in
+ # other fonts.
+ mapping = stix_virtual_fonts.get(fontname)
+ if (self._sans and mapping is None
+ and fontname not in ('regular', 'default')):
+ mapping = stix_virtual_fonts['sf']
+ doing_sans_conversion = True
+ else:
+ doing_sans_conversion = False
+
+ if mapping is not None:
+ if isinstance(mapping, dict):
+ try:
+ mapping = mapping[font_class]
+ except KeyError:
+ mapping = mapping['rm']
+
+ # Binary search for the source glyph
+ lo = 0
+ hi = len(mapping)
+ while lo < hi:
+ mid = (lo+hi)//2
+ range = mapping[mid]
+ if uniindex < range[0]:
+ hi = mid
+ elif uniindex <= range[1]:
+ break
+ else:
+ lo = mid + 1
+
+ if range[0] <= uniindex <= range[1]:
+ uniindex = uniindex - range[0] + range[3]
+ fontname = range[2]
+ elif not doing_sans_conversion:
+ # This will generate a dummy character
+ uniindex = 0x1
+ fontname = mpl.rcParams['mathtext.default']
+
+ # Fix some incorrect glyphs.
+ if fontname in ('rm', 'it'):
+ uniindex = stix_glyph_fixes.get(uniindex, uniindex)
+
+ # Handle private use area glyphs
+ if fontname in ('it', 'rm', 'bf') and 0xe000 <= uniindex <= 0xf8ff:
+ fontname = 'nonuni' + fontname
+
+ return fontname, uniindex
+
+ @functools.lru_cache()
+ def get_sized_alternatives_for_symbol(self, fontname, sym):
+ fixes = {
+ '\\{': '{', '\\}': '}', '\\[': '[', '\\]': ']',
+ '<': '\N{MATHEMATICAL LEFT ANGLE BRACKET}',
+ '>': '\N{MATHEMATICAL RIGHT ANGLE BRACKET}',
+ }
+ sym = fixes.get(sym, sym)
+ try:
+ uniindex = get_unicode_index(sym)
+ except ValueError:
+ return [(fontname, sym)]
+ alternatives = [(i, chr(uniindex)) for i in range(6)
+ if self._get_font(i).get_char_index(uniindex) != 0]
+ # The largest size of the radical symbol in STIX has incorrect
+ # metrics that cause it to be disconnected from the stem.
+ if sym == r'\__sqrt__':
+ alternatives = alternatives[:-1]
+ return alternatives
+
+
+class StixSansFonts(StixFonts):
+ """
+ A font handling class for the STIX fonts (that uses sans-serif
+ characters by default).
+ """
+ _sans = True
+
+
+class StandardPsFonts(Fonts):
+ """
+ Use the standard postscript fonts for rendering to backend_ps
+
+ Unlike the other font classes, BakomaFont and UnicodeFont, this
+ one requires the Ps backend.
+ """
+ basepath = str(cbook._get_data_path('fonts/afm'))
+
+ fontmap = {
+ 'cal': 'pzcmi8a', # Zapf Chancery
+ 'rm': 'pncr8a', # New Century Schoolbook
+ 'tt': 'pcrr8a', # Courier
+ 'it': 'pncri8a', # New Century Schoolbook Italic
+ 'sf': 'phvr8a', # Helvetica
+ 'bf': 'pncb8a', # New Century Schoolbook Bold
+ None: 'psyr', # Symbol
+ }
+
+ def __init__(self, default_font_prop, mathtext_backend=None):
+ if mathtext_backend is None:
+ # Circular import, can be dropped after public access to
+ # StandardPsFonts is removed and mathtext_backend made a required
+ # parameter.
+ from . import mathtext
+ mathtext_backend = mathtext.MathtextBackendPath()
+ super().__init__(default_font_prop, mathtext_backend)
+ self.glyphd = {}
+ self.fonts = {}
+
+ filename = findfont(default_font_prop, fontext='afm',
+ directory=self.basepath)
+ if filename is None:
+ filename = findfont('Helvetica', fontext='afm',
+ directory=self.basepath)
+ with open(filename, 'rb') as fd:
+ default_font = AFM(fd)
+ default_font.fname = filename
+
+ self.fonts['default'] = default_font
+ self.fonts['regular'] = default_font
+
+ pswriter = _api.deprecated("3.4")(property(lambda self: StringIO()))
+
+ def _get_font(self, font):
+ if font in self.fontmap:
+ basename = self.fontmap[font]
+ else:
+ basename = font
+
+ cached_font = self.fonts.get(basename)
+ if cached_font is None:
+ fname = os.path.join(self.basepath, basename + ".afm")
+ with open(fname, 'rb') as fd:
+ cached_font = AFM(fd)
+ cached_font.fname = fname
+ self.fonts[basename] = cached_font
+ self.fonts[cached_font.get_fontname()] = cached_font
+ return cached_font
+
+ def _get_info(self, fontname, font_class, sym, fontsize, dpi, math=True):
+ """Load the cmfont, metrics and glyph with caching."""
+ key = fontname, sym, fontsize, dpi
+ tup = self.glyphd.get(key)
+
+ if tup is not None:
+ return tup
+
+ # Only characters in the "Letter" class should really be italicized.
+ # This class includes greek letters, so we're ok
+ if (fontname == 'it' and
+ (len(sym) > 1
+ or not unicodedata.category(sym).startswith("L"))):
+ fontname = 'rm'
+
+ found_symbol = False
+
+ if sym in latex_to_standard:
+ fontname, num = latex_to_standard[sym]
+ glyph = chr(num)
+ found_symbol = True
+ elif len(sym) == 1:
+ glyph = sym
+ num = ord(glyph)
+ found_symbol = True
+ else:
+ _log.warning(
+ "No TeX to built-in Postscript mapping for {!r}".format(sym))
+
+ slanted = (fontname == 'it')
+ font = self._get_font(fontname)
+
+ if found_symbol:
+ try:
+ symbol_name = font.get_name_char(glyph)
+ except KeyError:
+ _log.warning(
+ "No glyph in standard Postscript font {!r} for {!r}"
+ .format(font.get_fontname(), sym))
+ found_symbol = False
+
+ if not found_symbol:
+ glyph = '?'
+ num = ord(glyph)
+ symbol_name = font.get_name_char(glyph)
+
+ offset = 0
+
+ scale = 0.001 * fontsize
+
+ xmin, ymin, xmax, ymax = [val * scale
+ for val in font.get_bbox_char(glyph)]
+ metrics = types.SimpleNamespace(
+ advance = font.get_width_char(glyph) * scale,
+ width = font.get_width_char(glyph) * scale,
+ height = font.get_height_char(glyph) * scale,
+ xmin = xmin,
+ xmax = xmax,
+ ymin = ymin+offset,
+ ymax = ymax+offset,
+ # iceberg is the equivalent of TeX's "height"
+ iceberg = ymax + offset,
+ slanted = slanted
+ )
+
+ self.glyphd[key] = types.SimpleNamespace(
+ font = font,
+ fontsize = fontsize,
+ postscript_name = font.get_fontname(),
+ metrics = metrics,
+ symbol_name = symbol_name,
+ num = num,
+ glyph = glyph,
+ offset = offset
+ )
+
+ return self.glyphd[key]
+
+ def get_kern(self, font1, fontclass1, sym1, fontsize1,
+ font2, fontclass2, sym2, fontsize2, dpi):
+ if font1 == font2 and fontsize1 == fontsize2:
+ info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
+ info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
+ font = info1.font
+ return (font.get_kern_dist(info1.glyph, info2.glyph)
+ * 0.001 * fontsize1)
+ return super().get_kern(font1, fontclass1, sym1, fontsize1,
+ font2, fontclass2, sym2, fontsize2, dpi)
+
+ def get_xheight(self, font, fontsize, dpi):
+ font = self._get_font(font)
+ return font.get_xheight() * 0.001 * fontsize
+
+ def get_underline_thickness(self, font, fontsize, dpi):
+ font = self._get_font(font)
+ return font.get_underline_thickness() * 0.001 * fontsize
+
+
+##############################################################################
+# TeX-LIKE BOX MODEL
+
+# The following is based directly on the document 'woven' from the
+# TeX82 source code. This information is also available in printed
+# form:
+#
+# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
+# TeX: The Program. Addison-Wesley Professional.
+#
+# The most relevant "chapters" are:
+# Data structures for boxes and their friends
+# Shipping pages out (Ship class)
+# Packaging (hpack and vpack)
+# Data structures for math mode
+# Subroutines for math mode
+# Typesetting math formulas
+#
+# Many of the docstrings below refer to a numbered "node" in that
+# book, e.g., node123
+#
+# Note that (as TeX) y increases downward, unlike many other parts of
+# matplotlib.
+
+# How much text shrinks when going to the next-smallest level. GROW_FACTOR
+# must be the inverse of SHRINK_FACTOR.
+SHRINK_FACTOR = 0.7
+GROW_FACTOR = 1 / SHRINK_FACTOR
+# The number of different sizes of chars to use, beyond which they will not
+# get any smaller
+NUM_SIZE_LEVELS = 6
+
+
+class FontConstantsBase:
+ """
+ A set of constants that controls how certain things, such as sub-
+ and superscripts are laid out. These are all metrics that can't
+ be reliably retrieved from the font metrics in the font itself.
+ """
+ # Percentage of x-height of additional horiz. space after sub/superscripts
+ script_space = 0.05
+
+ # Percentage of x-height that sub/superscripts drop below the baseline
+ subdrop = 0.4
+
+ # Percentage of x-height that superscripts are raised from the baseline
+ sup1 = 0.7
+
+ # Percentage of x-height that subscripts drop below the baseline
+ sub1 = 0.3
+
+ # Percentage of x-height that subscripts drop below the baseline when a
+ # superscript is present
+ sub2 = 0.5
+
+ # Percentage of x-height that sub/supercripts are offset relative to the
+ # nucleus edge for non-slanted nuclei
+ delta = 0.025
+
+ # Additional percentage of last character height above 2/3 of the
+ # x-height that supercripts are offset relative to the subscript
+ # for slanted nuclei
+ delta_slanted = 0.2
+
+ # Percentage of x-height that supercripts and subscripts are offset for
+ # integrals
+ delta_integral = 0.1
+
+
+class ComputerModernFontConstants(FontConstantsBase):
+ script_space = 0.075
+ subdrop = 0.2
+ sup1 = 0.45
+ sub1 = 0.2
+ sub2 = 0.3
+ delta = 0.075
+ delta_slanted = 0.3
+ delta_integral = 0.3
+
+
+class STIXFontConstants(FontConstantsBase):
+ script_space = 0.1
+ sup1 = 0.8
+ sub2 = 0.6
+ delta = 0.05
+ delta_slanted = 0.3
+ delta_integral = 0.3
+
+
+class STIXSansFontConstants(FontConstantsBase):
+ script_space = 0.05
+ sup1 = 0.8
+ delta_slanted = 0.6
+ delta_integral = 0.3
+
+
+class DejaVuSerifFontConstants(FontConstantsBase):
+ pass
+
+
+class DejaVuSansFontConstants(FontConstantsBase):
+ pass
+
+
+# Maps font family names to the FontConstantBase subclass to use
+_font_constant_mapping = {
+ 'DejaVu Sans': DejaVuSansFontConstants,
+ 'DejaVu Sans Mono': DejaVuSansFontConstants,
+ 'DejaVu Serif': DejaVuSerifFontConstants,
+ 'cmb10': ComputerModernFontConstants,
+ 'cmex10': ComputerModernFontConstants,
+ 'cmmi10': ComputerModernFontConstants,
+ 'cmr10': ComputerModernFontConstants,
+ 'cmss10': ComputerModernFontConstants,
+ 'cmsy10': ComputerModernFontConstants,
+ 'cmtt10': ComputerModernFontConstants,
+ 'STIXGeneral': STIXFontConstants,
+ 'STIXNonUnicode': STIXFontConstants,
+ 'STIXSizeFiveSym': STIXFontConstants,
+ 'STIXSizeFourSym': STIXFontConstants,
+ 'STIXSizeThreeSym': STIXFontConstants,
+ 'STIXSizeTwoSym': STIXFontConstants,
+ 'STIXSizeOneSym': STIXFontConstants,
+ # Map the fonts we used to ship, just for good measure
+ 'Bitstream Vera Sans': DejaVuSansFontConstants,
+ 'Bitstream Vera': DejaVuSansFontConstants,
+ }
+
+
+def _get_font_constant_set(state):
+ constants = _font_constant_mapping.get(
+ state.font_output._get_font(state.font).family_name,
+ FontConstantsBase)
+ # STIX sans isn't really its own fonts, just different code points
+ # in the STIX fonts, so we have to detect this one separately.
+ if (constants is STIXFontConstants and
+ isinstance(state.font_output, StixSansFonts)):
+ return STIXSansFontConstants
+ return constants
+
+
+class Node:
+ """A node in the TeX box model."""
+
+ def __init__(self):
+ self.size = 0
+
+ def __repr__(self):
+ return self.__class__.__name__
+
+ def get_kerning(self, next):
+ return 0.0
+
+ def shrink(self):
+ """
+ Shrinks one level smaller. There are only three levels of
+ sizes, after which things will no longer get smaller.
+ """
+ self.size += 1
+
+ def grow(self):
+ """
+ Grows one level larger. There is no limit to how big
+ something can get.
+ """
+ self.size -= 1
+
+ def render(self, x, y):
+ pass
+
+
+class Box(Node):
+ """A node with a physical location."""
+
+ def __init__(self, width, height, depth):
+ super().__init__()
+ self.width = width
+ self.height = height
+ self.depth = depth
+
+ def shrink(self):
+ super().shrink()
+ if self.size < NUM_SIZE_LEVELS:
+ self.width *= SHRINK_FACTOR
+ self.height *= SHRINK_FACTOR
+ self.depth *= SHRINK_FACTOR
+
+ def grow(self):
+ super().grow()
+ self.width *= GROW_FACTOR
+ self.height *= GROW_FACTOR
+ self.depth *= GROW_FACTOR
+
+ def render(self, x1, y1, x2, y2):
+ pass
+
+
+class Vbox(Box):
+ """A box with only height (zero width)."""
+
+ def __init__(self, height, depth):
+ super().__init__(0., height, depth)
+
+
+class Hbox(Box):
+ """A box with only width (zero height and depth)."""
+
+ def __init__(self, width):
+ super().__init__(width, 0., 0.)
+
+
+class Char(Node):
+ """
+ A single character.
+
+ Unlike TeX, the font information and metrics are stored with each `Char`
+ to make it easier to lookup the font metrics when needed. Note that TeX
+ boxes have a width, height, and depth, unlike Type1 and TrueType which use
+ a full bounding box and an advance in the x-direction. The metrics must
+ be converted to the TeX model, and the advance (if different from width)
+ must be converted into a `Kern` node when the `Char` is added to its parent
+ `Hlist`.
+ """
+
+ def __init__(self, c, state, math=True):
+ super().__init__()
+ self.c = c
+ self.font_output = state.font_output
+ self.font = state.font
+ self.font_class = state.font_class
+ self.fontsize = state.fontsize
+ self.dpi = state.dpi
+ self.math = math
+ # The real width, height and depth will be set during the
+ # pack phase, after we know the real fontsize
+ self._update_metrics()
+
+ def __repr__(self):
+ return '`%s`' % self.c
+
+ def _update_metrics(self):
+ metrics = self._metrics = self.font_output.get_metrics(
+ self.font, self.font_class, self.c, self.fontsize, self.dpi,
+ self.math)
+ if self.c == ' ':
+ self.width = metrics.advance
+ else:
+ self.width = metrics.width
+ self.height = metrics.iceberg
+ self.depth = -(metrics.iceberg - metrics.height)
+
+ def is_slanted(self):
+ return self._metrics.slanted
+
+ def get_kerning(self, next):
+ """
+ Return the amount of kerning between this and the given character.
+
+ This method is called when characters are strung together into `Hlist`
+ to create `Kern` nodes.
+ """
+ advance = self._metrics.advance - self.width
+ kern = 0.
+ if isinstance(next, Char):
+ kern = self.font_output.get_kern(
+ self.font, self.font_class, self.c, self.fontsize,
+ next.font, next.font_class, next.c, next.fontsize,
+ self.dpi)
+ return advance + kern
+
+ def render(self, x, y):
+ """
+ Render the character to the canvas
+ """
+ self.font_output.render_glyph(
+ x, y,
+ self.font, self.font_class, self.c, self.fontsize, self.dpi)
+
+ def shrink(self):
+ super().shrink()
+ if self.size < NUM_SIZE_LEVELS:
+ self.fontsize *= SHRINK_FACTOR
+ self.width *= SHRINK_FACTOR
+ self.height *= SHRINK_FACTOR
+ self.depth *= SHRINK_FACTOR
+
+ def grow(self):
+ super().grow()
+ self.fontsize *= GROW_FACTOR
+ self.width *= GROW_FACTOR
+ self.height *= GROW_FACTOR
+ self.depth *= GROW_FACTOR
+
+
+class Accent(Char):
+ """
+ The font metrics need to be dealt with differently for accents,
+ since they are already offset correctly from the baseline in
+ TrueType fonts.
+ """
+ def _update_metrics(self):
+ metrics = self._metrics = self.font_output.get_metrics(
+ self.font, self.font_class, self.c, self.fontsize, self.dpi)
+ self.width = metrics.xmax - metrics.xmin
+ self.height = metrics.ymax - metrics.ymin
+ self.depth = 0
+
+ def shrink(self):
+ super().shrink()
+ self._update_metrics()
+
+ def grow(self):
+ super().grow()
+ self._update_metrics()
+
+ def render(self, x, y):
+ """
+ Render the character to the canvas.
+ """
+ self.font_output.render_glyph(
+ x - self._metrics.xmin, y + self._metrics.ymin,
+ self.font, self.font_class, self.c, self.fontsize, self.dpi)
+
+
+class List(Box):
+ """A list of nodes (either horizontal or vertical)."""
+
+ def __init__(self, elements):
+ super().__init__(0., 0., 0.)
+ self.shift_amount = 0. # An arbitrary offset
+ self.children = elements # The child nodes of this list
+ # The following parameters are set in the vpack and hpack functions
+ self.glue_set = 0. # The glue setting of this list
+ self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
+ self.glue_order = 0 # The order of infinity (0 - 3) for the glue
+
+ def __repr__(self):
+ return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
+ super().__repr__(),
+ self.width, self.height,
+ self.depth, self.shift_amount,
+ ' '.join([repr(x) for x in self.children]))
+
+ @staticmethod
+ def _determine_order(totals):
+ """
+ Determine the highest order of glue used by the members of this list.
+
+ Helper function used by vpack and hpack.
+ """
+ for i in range(len(totals))[::-1]:
+ if totals[i] != 0:
+ return i
+ return 0
+
+ def _set_glue(self, x, sign, totals, error_type):
+ o = self._determine_order(totals)
+ self.glue_order = o
+ self.glue_sign = sign
+ if totals[o] != 0.:
+ self.glue_set = x / totals[o]
+ else:
+ self.glue_sign = 0
+ self.glue_ratio = 0.
+ if o == 0:
+ if len(self.children):
+ _log.warning("%s %s: %r",
+ error_type, self.__class__.__name__, self)
+
+ def shrink(self):
+ for child in self.children:
+ child.shrink()
+ super().shrink()
+ if self.size < NUM_SIZE_LEVELS:
+ self.shift_amount *= SHRINK_FACTOR
+ self.glue_set *= SHRINK_FACTOR
+
+ def grow(self):
+ for child in self.children:
+ child.grow()
+ super().grow()
+ self.shift_amount *= GROW_FACTOR
+ self.glue_set *= GROW_FACTOR
+
+
+class Hlist(List):
+ """A horizontal list of boxes."""
+
+ def __init__(self, elements, w=0., m='additional', do_kern=True):
+ super().__init__(elements)
+ if do_kern:
+ self.kern()
+ self.hpack()
+
+ def kern(self):
+ """
+ Insert `Kern` nodes between `Char` nodes to set kerning.
+
+ The `Char` nodes themselves determine the amount of kerning they need
+ (in `~Char.get_kerning`), and this function just creates the correct
+ linked list.
+ """
+ new_children = []
+ num_children = len(self.children)
+ if num_children:
+ for i in range(num_children):
+ elem = self.children[i]
+ if i < num_children - 1:
+ next = self.children[i + 1]
+ else:
+ next = None
+
+ new_children.append(elem)
+ kerning_distance = elem.get_kerning(next)
+ if kerning_distance != 0.:
+ kern = Kern(kerning_distance)
+ new_children.append(kern)
+ self.children = new_children
+
+ # This is a failed experiment to fake cross-font kerning.
+# def get_kerning(self, next):
+# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
+# if isinstance(next, Char):
+# print "CASE A"
+# return self.children[-2].get_kerning(next)
+# elif (isinstance(next, Hlist) and len(next.children)
+# and isinstance(next.children[0], Char)):
+# print "CASE B"
+# result = self.children[-2].get_kerning(next.children[0])
+# print result
+# return result
+# return 0.0
+
+ def hpack(self, w=0., m='additional'):
+ r"""
+ Compute the dimensions of the resulting boxes, and adjust the glue if
+ one of those dimensions is pre-specified. The computed sizes normally
+ enclose all of the material inside the new box; but some items may
+ stick out if negative glue is used, if the box is overfull, or if a
+ ``\vbox`` includes other boxes that have been shifted left.
+
+ Parameters
+ ----------
+ w : float, default: 0
+ A width.
+ m : {'exactly', 'additional'}, default: 'additional'
+ Whether to produce a box whose width is 'exactly' *w*; or a box
+ with the natural width of the contents, plus *w* ('additional').
+
+ Notes
+ -----
+ The defaults produce a box with the natural width of the contents.
+ """
+ # I don't know why these get reset in TeX. Shift_amount is pretty
+ # much useless if we do.
+ # self.shift_amount = 0.
+ h = 0.
+ d = 0.
+ x = 0.
+ total_stretch = [0.] * 4
+ total_shrink = [0.] * 4
+ for p in self.children:
+ if isinstance(p, Char):
+ x += p.width
+ h = max(h, p.height)
+ d = max(d, p.depth)
+ elif isinstance(p, Box):
+ x += p.width
+ if not np.isinf(p.height) and not np.isinf(p.depth):
+ s = getattr(p, 'shift_amount', 0.)
+ h = max(h, p.height - s)
+ d = max(d, p.depth + s)
+ elif isinstance(p, Glue):
+ glue_spec = p.glue_spec
+ x += glue_spec.width
+ total_stretch[glue_spec.stretch_order] += glue_spec.stretch
+ total_shrink[glue_spec.shrink_order] += glue_spec.shrink
+ elif isinstance(p, Kern):
+ x += p.width
+ self.height = h
+ self.depth = d
+
+ if m == 'additional':
+ w += x
+ self.width = w
+ x = w - x
+
+ if x == 0.:
+ self.glue_sign = 0
+ self.glue_order = 0
+ self.glue_ratio = 0.
+ return
+ if x > 0.:
+ self._set_glue(x, 1, total_stretch, "Overfull")
+ else:
+ self._set_glue(x, -1, total_shrink, "Underfull")
+
+
+class Vlist(List):
+ """A vertical list of boxes."""
+
+ def __init__(self, elements, h=0., m='additional'):
+ super().__init__(elements)
+ self.vpack()
+
+ def vpack(self, h=0., m='additional', l=np.inf):
+ """
+ Compute the dimensions of the resulting boxes, and to adjust the glue
+ if one of those dimensions is pre-specified.
+
+ Parameters
+ ----------
+ h : float, default: 0
+ A height.
+ m : {'exactly', 'additional'}, default: 'additional'
+ Whether to produce a box whose height is 'exactly' *w*; or a box
+ with the natural height of the contents, plus *w* ('additional').
+ l : float, default: np.inf
+ The maximum height.
+
+ Notes
+ -----
+ The defaults produce a box with the natural height of the contents.
+ """
+ # I don't know why these get reset in TeX. Shift_amount is pretty
+ # much useless if we do.
+ # self.shift_amount = 0.
+ w = 0.
+ d = 0.
+ x = 0.
+ total_stretch = [0.] * 4
+ total_shrink = [0.] * 4
+ for p in self.children:
+ if isinstance(p, Box):
+ x += d + p.height
+ d = p.depth
+ if not np.isinf(p.width):
+ s = getattr(p, 'shift_amount', 0.)
+ w = max(w, p.width + s)
+ elif isinstance(p, Glue):
+ x += d
+ d = 0.
+ glue_spec = p.glue_spec
+ x += glue_spec.width
+ total_stretch[glue_spec.stretch_order] += glue_spec.stretch
+ total_shrink[glue_spec.shrink_order] += glue_spec.shrink
+ elif isinstance(p, Kern):
+ x += d + p.width
+ d = 0.
+ elif isinstance(p, Char):
+ raise RuntimeError(
+ "Internal mathtext error: Char node found in Vlist")
+
+ self.width = w
+ if d > l:
+ x += d - l
+ self.depth = l
+ else:
+ self.depth = d
+
+ if m == 'additional':
+ h += x
+ self.height = h
+ x = h - x
+
+ if x == 0:
+ self.glue_sign = 0
+ self.glue_order = 0
+ self.glue_ratio = 0.
+ return
+
+ if x > 0.:
+ self._set_glue(x, 1, total_stretch, "Overfull")
+ else:
+ self._set_glue(x, -1, total_shrink, "Underfull")
+
+
+class Rule(Box):
+ """
+ A solid black rectangle.
+
+ It has *width*, *depth*, and *height* fields just as in an `Hlist`.
+ However, if any of these dimensions is inf, the actual value will be
+ determined by running the rule up to the boundary of the innermost
+ enclosing box. This is called a "running dimension". The width is never
+ running in an `Hlist`; the height and depth are never running in a `Vlist`.
+ """
+
+ def __init__(self, width, height, depth, state):
+ super().__init__(width, height, depth)
+ self.font_output = state.font_output
+
+ def render(self, x, y, w, h):
+ self.font_output.render_rect_filled(x, y, x + w, y + h)
+
+
+class Hrule(Rule):
+ """Convenience class to create a horizontal rule."""
+
+ def __init__(self, state, thickness=None):
+ if thickness is None:
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ height = depth = thickness * 0.5
+ super().__init__(np.inf, height, depth, state)
+
+
+class Vrule(Rule):
+ """Convenience class to create a vertical rule."""
+
+ def __init__(self, state):
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ super().__init__(thickness, np.inf, np.inf, state)
+
+
+_GlueSpec = namedtuple(
+ "_GlueSpec", "width stretch stretch_order shrink shrink_order")
+_GlueSpec._named = {
+ 'fil': _GlueSpec(0., 1., 1, 0., 0),
+ 'fill': _GlueSpec(0., 1., 2, 0., 0),
+ 'filll': _GlueSpec(0., 1., 3, 0., 0),
+ 'neg_fil': _GlueSpec(0., 0., 0, 1., 1),
+ 'neg_fill': _GlueSpec(0., 0., 0, 1., 2),
+ 'neg_filll': _GlueSpec(0., 0., 0, 1., 3),
+ 'empty': _GlueSpec(0., 0., 0, 0., 0),
+ 'ss': _GlueSpec(0., 1., 1, -1., 1),
+}
+
+
+class Glue(Node):
+ """
+ Most of the information in this object is stored in the underlying
+ ``_GlueSpec`` class, which is shared between multiple glue objects.
+ (This is a memory optimization which probably doesn't matter anymore, but
+ it's easier to stick to what TeX does.)
+ """
+
+ def __init__(self, glue_type):
+ super().__init__()
+ if isinstance(glue_type, str):
+ glue_spec = _GlueSpec._named[glue_type]
+ elif isinstance(glue_type, _GlueSpec):
+ glue_spec = glue_type
+ else:
+ raise ValueError("glue_type must be a glue spec name or instance")
+ self.glue_spec = glue_spec
+
+ def shrink(self):
+ super().shrink()
+ if self.size < NUM_SIZE_LEVELS:
+ g = self.glue_spec
+ self.glue_spec = g._replace(width=g.width * SHRINK_FACTOR)
+
+ def grow(self):
+ super().grow()
+ g = self.glue_spec
+ self.glue_spec = g._replace(width=g.width * GROW_FACTOR)
+
+
+class HCentered(Hlist):
+ """
+ A convenience class to create an `Hlist` whose contents are
+ centered within its enclosing box.
+ """
+
+ def __init__(self, elements):
+ super().__init__([Glue('ss'), *elements, Glue('ss')], do_kern=False)
+
+
+class VCentered(Vlist):
+ """
+ A convenience class to create a `Vlist` whose contents are
+ centered within its enclosing box.
+ """
+
+ def __init__(self, elements):
+ super().__init__([Glue('ss'), *elements, Glue('ss')])
+
+
+class Kern(Node):
+ """
+ A `Kern` node has a width field to specify a (normally
+ negative) amount of spacing. This spacing correction appears in
+ horizontal lists between letters like A and V when the font
+ designer said that it looks better to move them closer together or
+ further apart. A kern node can also appear in a vertical list,
+ when its *width* denotes additional spacing in the vertical
+ direction.
+ """
+
+ height = 0
+ depth = 0
+
+ def __init__(self, width):
+ super().__init__()
+ self.width = width
+
+ def __repr__(self):
+ return "k%.02f" % self.width
+
+ def shrink(self):
+ super().shrink()
+ if self.size < NUM_SIZE_LEVELS:
+ self.width *= SHRINK_FACTOR
+
+ def grow(self):
+ super().grow()
+ self.width *= GROW_FACTOR
+
+
+class SubSuperCluster(Hlist):
+ """
+ A hack to get around that fact that this code does a two-pass parse like
+ TeX. This lets us store enough information in the hlist itself, namely the
+ nucleus, sub- and super-script, such that if another script follows that
+ needs to be attached, it can be reconfigured on the fly.
+ """
+
+ def __init__(self):
+ self.nucleus = None
+ self.sub = None
+ self.super = None
+ super().__init__([])
+
+
+class AutoHeightChar(Hlist):
+ """
+ A character as close to the given height and depth as possible.
+
+ When using a font with multiple height versions of some characters (such as
+ the BaKoMa fonts), the correct glyph will be selected, otherwise this will
+ always just return a scaled version of the glyph.
+ """
+
+ def __init__(self, c, height, depth, state, always=False, factor=None):
+ alternatives = state.font_output.get_sized_alternatives_for_symbol(
+ state.font, c)
+
+ xHeight = state.font_output.get_xheight(
+ state.font, state.fontsize, state.dpi)
+
+ state = state.copy()
+ target_total = height + depth
+ for fontname, sym in alternatives:
+ state.font = fontname
+ char = Char(sym, state)
+ # Ensure that size 0 is chosen when the text is regular sized but
+ # with descender glyphs by subtracting 0.2 * xHeight
+ if char.height + char.depth >= target_total - 0.2 * xHeight:
+ break
+
+ shift = 0
+ if state.font != 0:
+ if factor is None:
+ factor = target_total / (char.height + char.depth)
+ state.fontsize *= factor
+ char = Char(sym, state)
+
+ shift = (depth - char.depth)
+
+ super().__init__([char])
+ self.shift_amount = shift
+
+
+class AutoWidthChar(Hlist):
+ """
+ A character as close to the given width as possible.
+
+ When using a font with multiple width versions of some characters (such as
+ the BaKoMa fonts), the correct glyph will be selected, otherwise this will
+ always just return a scaled version of the glyph.
+ """
+
+ def __init__(self, c, width, state, always=False, char_class=Char):
+ alternatives = state.font_output.get_sized_alternatives_for_symbol(
+ state.font, c)
+
+ state = state.copy()
+ for fontname, sym in alternatives:
+ state.font = fontname
+ char = char_class(sym, state)
+ if char.width >= width:
+ break
+
+ factor = width / char.width
+ state.fontsize *= factor
+ char = char_class(sym, state)
+
+ super().__init__([char])
+ self.width = char.width
+
+
+class Ship:
+ """
+ Ship boxes to output once they have been set up, this sends them to output.
+
+ Since boxes can be inside of boxes inside of boxes, the main work of `Ship`
+ is done by two mutually recursive routines, `hlist_out` and `vlist_out`,
+ which traverse the `Hlist` nodes and `Vlist` nodes inside of horizontal
+ and vertical boxes. The global variables used in TeX to store state as it
+ processes have become member variables here.
+ """
+
+ def __call__(self, ox, oy, box):
+ self.max_push = 0 # Deepest nesting of push commands so far
+ self.cur_s = 0
+ self.cur_v = 0.
+ self.cur_h = 0.
+ self.off_h = ox
+ self.off_v = oy + box.height
+ self.hlist_out(box)
+
+ @staticmethod
+ def clamp(value):
+ if value < -1000000000.:
+ return -1000000000.
+ if value > 1000000000.:
+ return 1000000000.
+ return value
+
+ def hlist_out(self, box):
+ cur_g = 0
+ cur_glue = 0.
+ glue_order = box.glue_order
+ glue_sign = box.glue_sign
+ base_line = self.cur_v
+ left_edge = self.cur_h
+ self.cur_s += 1
+ self.max_push = max(self.cur_s, self.max_push)
+ clamp = self.clamp
+
+ for p in box.children:
+ if isinstance(p, Char):
+ p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
+ self.cur_h += p.width
+ elif isinstance(p, Kern):
+ self.cur_h += p.width
+ elif isinstance(p, List):
+ # node623
+ if len(p.children) == 0:
+ self.cur_h += p.width
+ else:
+ edge = self.cur_h
+ self.cur_v = base_line + p.shift_amount
+ if isinstance(p, Hlist):
+ self.hlist_out(p)
+ else:
+ # p.vpack(box.height + box.depth, 'exactly')
+ self.vlist_out(p)
+ self.cur_h = edge + p.width
+ self.cur_v = base_line
+ elif isinstance(p, Box):
+ # node624
+ rule_height = p.height
+ rule_depth = p.depth
+ rule_width = p.width
+ if np.isinf(rule_height):
+ rule_height = box.height
+ if np.isinf(rule_depth):
+ rule_depth = box.depth
+ if rule_height > 0 and rule_width > 0:
+ self.cur_v = base_line + rule_depth
+ p.render(self.cur_h + self.off_h,
+ self.cur_v + self.off_v,
+ rule_width, rule_height)
+ self.cur_v = base_line
+ self.cur_h += rule_width
+ elif isinstance(p, Glue):
+ # node625
+ glue_spec = p.glue_spec
+ rule_width = glue_spec.width - cur_g
+ if glue_sign != 0: # normal
+ if glue_sign == 1: # stretching
+ if glue_spec.stretch_order == glue_order:
+ cur_glue += glue_spec.stretch
+ cur_g = round(clamp(box.glue_set * cur_glue))
+ elif glue_spec.shrink_order == glue_order:
+ cur_glue += glue_spec.shrink
+ cur_g = round(clamp(box.glue_set * cur_glue))
+ rule_width += cur_g
+ self.cur_h += rule_width
+ self.cur_s -= 1
+
+ def vlist_out(self, box):
+ cur_g = 0
+ cur_glue = 0.
+ glue_order = box.glue_order
+ glue_sign = box.glue_sign
+ self.cur_s += 1
+ self.max_push = max(self.max_push, self.cur_s)
+ left_edge = self.cur_h
+ self.cur_v -= box.height
+ top_edge = self.cur_v
+ clamp = self.clamp
+
+ for p in box.children:
+ if isinstance(p, Kern):
+ self.cur_v += p.width
+ elif isinstance(p, List):
+ if len(p.children) == 0:
+ self.cur_v += p.height + p.depth
+ else:
+ self.cur_v += p.height
+ self.cur_h = left_edge + p.shift_amount
+ save_v = self.cur_v
+ p.width = box.width
+ if isinstance(p, Hlist):
+ self.hlist_out(p)
+ else:
+ self.vlist_out(p)
+ self.cur_v = save_v + p.depth
+ self.cur_h = left_edge
+ elif isinstance(p, Box):
+ rule_height = p.height
+ rule_depth = p.depth
+ rule_width = p.width
+ if np.isinf(rule_width):
+ rule_width = box.width
+ rule_height += rule_depth
+ if rule_height > 0 and rule_depth > 0:
+ self.cur_v += rule_height
+ p.render(self.cur_h + self.off_h,
+ self.cur_v + self.off_v,
+ rule_width, rule_height)
+ elif isinstance(p, Glue):
+ glue_spec = p.glue_spec
+ rule_height = glue_spec.width - cur_g
+ if glue_sign != 0: # normal
+ if glue_sign == 1: # stretching
+ if glue_spec.stretch_order == glue_order:
+ cur_glue += glue_spec.stretch
+ cur_g = round(clamp(box.glue_set * cur_glue))
+ elif glue_spec.shrink_order == glue_order: # shrinking
+ cur_glue += glue_spec.shrink
+ cur_g = round(clamp(box.glue_set * cur_glue))
+ rule_height += cur_g
+ self.cur_v += rule_height
+ elif isinstance(p, Char):
+ raise RuntimeError(
+ "Internal mathtext error: Char node found in vlist")
+ self.cur_s -= 1
+
+
+ship = Ship()
+
+
+##############################################################################
+# PARSER
+
+
+def Error(msg):
+ """Helper class to raise parser errors."""
+ def raise_error(s, loc, toks):
+ raise ParseFatalException(s, loc, msg)
+
+ empty = Empty()
+ empty.setParseAction(raise_error)
+ return empty
+
+
+class Parser:
+ """
+ A pyparsing-based parser for strings containing math expressions.
+
+ Raw text may also appear outside of pairs of ``$``.
+
+ The grammar is based directly on that in TeX, though it cuts a few corners.
+ """
+
+ class _MathStyle(enum.Enum):
+ DISPLAYSTYLE = enum.auto()
+ TEXTSTYLE = enum.auto()
+ SCRIPTSTYLE = enum.auto()
+ SCRIPTSCRIPTSTYLE = enum.auto()
+
+ _binary_operators = set('''
+ + * -
+ \\pm \\sqcap \\rhd
+ \\mp \\sqcup \\unlhd
+ \\times \\vee \\unrhd
+ \\div \\wedge \\oplus
+ \\ast \\setminus \\ominus
+ \\star \\wr \\otimes
+ \\circ \\diamond \\oslash
+ \\bullet \\bigtriangleup \\odot
+ \\cdot \\bigtriangledown \\bigcirc
+ \\cap \\triangleleft \\dagger
+ \\cup \\triangleright \\ddagger
+ \\uplus \\lhd \\amalg'''.split())
+
+ _relation_symbols = set('''
+ = < > :
+ \\leq \\geq \\equiv \\models
+ \\prec \\succ \\sim \\perp
+ \\preceq \\succeq \\simeq \\mid
+ \\ll \\gg \\asymp \\parallel
+ \\subset \\supset \\approx \\bowtie
+ \\subseteq \\supseteq \\cong \\Join
+ \\sqsubset \\sqsupset \\neq \\smile
+ \\sqsubseteq \\sqsupseteq \\doteq \\frown
+ \\in \\ni \\propto \\vdash
+ \\dashv \\dots \\dotplus \\doteqdot'''.split())
+
+ _arrow_symbols = set('''
+ \\leftarrow \\longleftarrow \\uparrow
+ \\Leftarrow \\Longleftarrow \\Uparrow
+ \\rightarrow \\longrightarrow \\downarrow
+ \\Rightarrow \\Longrightarrow \\Downarrow
+ \\leftrightarrow \\longleftrightarrow \\updownarrow
+ \\Leftrightarrow \\Longleftrightarrow \\Updownarrow
+ \\mapsto \\longmapsto \\nearrow
+ \\hookleftarrow \\hookrightarrow \\searrow
+ \\leftharpoonup \\rightharpoonup \\swarrow
+ \\leftharpoondown \\rightharpoondown \\nwarrow
+ \\rightleftharpoons \\leadsto'''.split())
+
+ _spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
+
+ _punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
+
+ _overunder_symbols = set(r'''
+ \sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
+ \bigwedge \bigodot \bigotimes \bigoplus \biguplus
+ '''.split())
+
+ _overunder_functions = set(
+ "lim liminf limsup sup max min".split())
+
+ _dropsub_symbols = set(r'''\int \oint'''.split())
+
+ _fontnames = set("rm cal it tt sf bf default bb frak scr regular".split())
+
+ _function_names = set("""
+ arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
+ liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
+ coth inf max tanh""".split())
+
+ _ambi_delim = set("""
+ | \\| / \\backslash \\uparrow \\downarrow \\updownarrow \\Uparrow
+ \\Downarrow \\Updownarrow . \\vert \\Vert \\\\|""".split())
+
+ _left_delim = set(r"( [ \{ < \lfloor \langle \lceil".split())
+
+ _right_delim = set(r") ] \} > \rfloor \rangle \rceil".split())
+
+ def __init__(self):
+ p = types.SimpleNamespace()
+ # All forward declarations are here
+ p.accent = Forward()
+ p.ambi_delim = Forward()
+ p.apostrophe = Forward()
+ p.auto_delim = Forward()
+ p.binom = Forward()
+ p.bslash = Forward()
+ p.c_over_c = Forward()
+ p.customspace = Forward()
+ p.end_group = Forward()
+ p.float_literal = Forward()
+ p.font = Forward()
+ p.frac = Forward()
+ p.dfrac = Forward()
+ p.function = Forward()
+ p.genfrac = Forward()
+ p.group = Forward()
+ p.int_literal = Forward()
+ p.latexfont = Forward()
+ p.lbracket = Forward()
+ p.left_delim = Forward()
+ p.lbrace = Forward()
+ p.main = Forward()
+ p.math = Forward()
+ p.math_string = Forward()
+ p.non_math = Forward()
+ p.operatorname = Forward()
+ p.overline = Forward()
+ p.overset = Forward()
+ p.placeable = Forward()
+ p.rbrace = Forward()
+ p.rbracket = Forward()
+ p.required_group = Forward()
+ p.right_delim = Forward()
+ p.right_delim_safe = Forward()
+ p.simple = Forward()
+ p.simple_group = Forward()
+ p.single_symbol = Forward()
+ p.accentprefixed = Forward()
+ p.space = Forward()
+ p.sqrt = Forward()
+ p.start_group = Forward()
+ p.subsuper = Forward()
+ p.subsuperop = Forward()
+ p.symbol = Forward()
+ p.symbol_name = Forward()
+ p.token = Forward()
+ p.underset = Forward()
+ p.unknown_symbol = Forward()
+
+ # Set names on everything -- very useful for debugging
+ for key, val in vars(p).items():
+ if not key.startswith('_'):
+ val.setName(key)
+
+ p.float_literal <<= Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
+ p.int_literal <<= Regex("[-+]?[0-9]+")
+
+ p.lbrace <<= Literal('{').suppress()
+ p.rbrace <<= Literal('}').suppress()
+ p.lbracket <<= Literal('[').suppress()
+ p.rbracket <<= Literal(']').suppress()
+ p.bslash <<= Literal('\\')
+
+ p.space <<= oneOf(list(self._space_widths))
+ p.customspace <<= (
+ Suppress(Literal(r'\hspace'))
+ - ((p.lbrace + p.float_literal + p.rbrace)
+ | Error(r"Expected \hspace{n}"))
+ )
+
+ unicode_range = "\U00000080-\U0001ffff"
+ p.single_symbol <<= Regex(
+ r"([a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|%s])|(\\[%%${}\[\]_|])" %
+ unicode_range)
+ p.accentprefixed <<= Suppress(p.bslash) + oneOf(self._accentprefixed)
+ p.symbol_name <<= (
+ Combine(p.bslash + oneOf(list(tex2uni)))
+ + Suppress(Regex("(?=[^A-Za-z]|$)").leaveWhitespace())
+ )
+ p.symbol <<= (p.single_symbol | p.symbol_name).leaveWhitespace()
+
+ p.apostrophe <<= Regex("'+")
+
+ p.c_over_c <<= (
+ Suppress(p.bslash)
+ + oneOf(list(self._char_over_chars))
+ )
+
+ p.accent <<= Group(
+ Suppress(p.bslash)
+ + oneOf([*self._accent_map, *self._wide_accents])
+ + Suppress(Optional(White()))
+ - p.placeable
+ )
+
+ p.function <<= (
+ Suppress(p.bslash)
+ + oneOf(list(self._function_names))
+ )
+
+ p.start_group <<= Optional(p.latexfont) + p.lbrace
+ p.end_group <<= p.rbrace.copy()
+ p.simple_group <<= Group(p.lbrace + ZeroOrMore(p.token) + p.rbrace)
+ p.required_group <<= Group(p.lbrace + OneOrMore(p.token) + p.rbrace)
+ p.group <<= Group(
+ p.start_group + ZeroOrMore(p.token) + p.end_group
+ )
+
+ p.font <<= Suppress(p.bslash) + oneOf(list(self._fontnames))
+ p.latexfont <<= (
+ Suppress(p.bslash)
+ + oneOf(['math' + x for x in self._fontnames])
+ )
+
+ p.frac <<= Group(
+ Suppress(Literal(r"\frac"))
+ - ((p.required_group + p.required_group)
+ | Error(r"Expected \frac{num}{den}"))
+ )
+
+ p.dfrac <<= Group(
+ Suppress(Literal(r"\dfrac"))
+ - ((p.required_group + p.required_group)
+ | Error(r"Expected \dfrac{num}{den}"))
+ )
+
+ p.binom <<= Group(
+ Suppress(Literal(r"\binom"))
+ - ((p.required_group + p.required_group)
+ | Error(r"Expected \binom{num}{den}"))
+ )
+
+ p.ambi_delim <<= oneOf(list(self._ambi_delim))
+ p.left_delim <<= oneOf(list(self._left_delim))
+ p.right_delim <<= oneOf(list(self._right_delim))
+ p.right_delim_safe <<= oneOf([*(self._right_delim - {'}'}), r'\}'])
+
+ p.genfrac <<= Group(
+ Suppress(Literal(r"\genfrac"))
+ - (((p.lbrace
+ + Optional(p.ambi_delim | p.left_delim, default='')
+ + p.rbrace)
+ + (p.lbrace
+ + Optional(p.ambi_delim | p.right_delim_safe, default='')
+ + p.rbrace)
+ + (p.lbrace + p.float_literal + p.rbrace)
+ + p.simple_group + p.required_group + p.required_group)
+ | Error("Expected "
+ r"\genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}"))
+ )
+
+ p.sqrt <<= Group(
+ Suppress(Literal(r"\sqrt"))
+ - ((Group(Optional(
+ p.lbracket + OneOrMore(~p.rbracket + p.token) + p.rbracket))
+ + p.required_group)
+ | Error("Expected \\sqrt{value}"))
+ )
+
+ p.overline <<= Group(
+ Suppress(Literal(r"\overline"))
+ - (p.required_group | Error("Expected \\overline{value}"))
+ )
+
+ p.overset <<= Group(
+ Suppress(Literal(r"\overset"))
+ - ((p.simple_group + p.simple_group)
+ | Error("Expected \\overset{body}{annotation}"))
+ )
+
+ p.underset <<= Group(
+ Suppress(Literal(r"\underset"))
+ - ((p.simple_group + p.simple_group)
+ | Error("Expected \\underset{body}{annotation}"))
+ )
+
+ p.unknown_symbol <<= Combine(p.bslash + Regex("[A-Za-z]*"))
+
+ p.operatorname <<= Group(
+ Suppress(Literal(r"\operatorname"))
+ - ((p.lbrace + ZeroOrMore(p.simple | p.unknown_symbol) + p.rbrace)
+ | Error("Expected \\operatorname{value}"))
+ )
+
+ p.placeable <<= (
+ p.accentprefixed # Must be before accent so named symbols that are
+ # prefixed with an accent name work
+ | p.accent # Must be before symbol as all accents are symbols
+ | p.symbol # Must be third to catch all named symbols and single
+ # chars not in a group
+ | p.c_over_c
+ | p.function
+ | p.group
+ | p.frac
+ | p.dfrac
+ | p.binom
+ | p.genfrac
+ | p.overset
+ | p.underset
+ | p.sqrt
+ | p.overline
+ | p.operatorname
+ )
+
+ p.simple <<= (
+ p.space
+ | p.customspace
+ | p.font
+ | p.subsuper
+ )
+
+ p.subsuperop <<= oneOf(["_", "^"])
+
+ p.subsuper <<= Group(
+ (Optional(p.placeable)
+ + OneOrMore(p.subsuperop - p.placeable)
+ + Optional(p.apostrophe))
+ | (p.placeable + Optional(p.apostrophe))
+ | p.apostrophe
+ )
+
+ p.token <<= (
+ p.simple
+ | p.auto_delim
+ | p.unknown_symbol # Must be last
+ )
+
+ p.auto_delim <<= (
+ Suppress(Literal(r"\left"))
+ - ((p.left_delim | p.ambi_delim)
+ | Error("Expected a delimiter"))
+ + Group(ZeroOrMore(p.simple | p.auto_delim))
+ + Suppress(Literal(r"\right"))
+ - ((p.right_delim | p.ambi_delim)
+ | Error("Expected a delimiter"))
+ )
+
+ p.math <<= OneOrMore(p.token)
+
+ p.math_string <<= QuotedString('$', '\\', unquoteResults=False)
+
+ p.non_math <<= Regex(r"(?:(?:\\[$])|[^$])*").leaveWhitespace()
+
+ p.main <<= (
+ p.non_math + ZeroOrMore(p.math_string + p.non_math) + StringEnd()
+ )
+
+ # Set actions
+ for key, val in vars(p).items():
+ if not key.startswith('_'):
+ if hasattr(self, key):
+ val.setParseAction(getattr(self, key))
+
+ self._expression = p.main
+ self._math_expression = p.math
+
+ def parse(self, s, fonts_object, fontsize, dpi):
+ """
+ Parse expression *s* using the given *fonts_object* for
+ output, at the given *fontsize* and *dpi*.
+
+ Returns the parse tree of `Node` instances.
+ """
+ self._state_stack = [
+ self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
+ self._em_width_cache = {}
+ try:
+ result = self._expression.parseString(s)
+ except ParseBaseException as err:
+ raise ValueError("\n".join(["",
+ err.line,
+ " " * (err.column - 1) + "^",
+ str(err)])) from err
+ self._state_stack = None
+ self._em_width_cache = {}
+ self._expression.resetCache()
+ return result[0]
+
+ # The state of the parser is maintained in a stack. Upon
+ # entering and leaving a group { } or math/non-math, the stack
+ # is pushed and popped accordingly. The current state always
+ # exists in the top element of the stack.
+ class State:
+ """
+ Stores the state of the parser.
+
+ States are pushed and popped from a stack as necessary, and
+ the "current" state is always at the top of the stack.
+ """
+ def __init__(self, font_output, font, font_class, fontsize, dpi):
+ self.font_output = font_output
+ self._font = font
+ self.font_class = font_class
+ self.fontsize = fontsize
+ self.dpi = dpi
+
+ def copy(self):
+ return Parser.State(
+ self.font_output,
+ self.font,
+ self.font_class,
+ self.fontsize,
+ self.dpi)
+
+ @property
+ def font(self):
+ return self._font
+
+ @font.setter
+ def font(self, name):
+ if name in ('rm', 'it', 'bf'):
+ self.font_class = name
+ self._font = name
+
+ def get_state(self):
+ """Get the current `State` of the parser."""
+ return self._state_stack[-1]
+
+ def pop_state(self):
+ """Pop a `State` off of the stack."""
+ self._state_stack.pop()
+
+ def push_state(self):
+ """Push a new `State` onto the stack, copying the current state."""
+ self._state_stack.append(self.get_state().copy())
+
+ def main(self, s, loc, toks):
+ return [Hlist(toks)]
+
+ def math_string(self, s, loc, toks):
+ return self._math_expression.parseString(toks[0][1:-1])
+
+ def math(self, s, loc, toks):
+ hlist = Hlist(toks)
+ self.pop_state()
+ return [hlist]
+
+ def non_math(self, s, loc, toks):
+ s = toks[0].replace(r'\$', '$')
+ symbols = [Char(c, self.get_state(), math=False) for c in s]
+ hlist = Hlist(symbols)
+ # We're going into math now, so set font to 'it'
+ self.push_state()
+ self.get_state().font = mpl.rcParams['mathtext.default']
+ return [hlist]
+
+ def _make_space(self, percentage):
+ # All spaces are relative to em width
+ state = self.get_state()
+ key = (state.font, state.fontsize, state.dpi)
+ width = self._em_width_cache.get(key)
+ if width is None:
+ metrics = state.font_output.get_metrics(
+ state.font, mpl.rcParams['mathtext.default'], 'm',
+ state.fontsize, state.dpi)
+ width = metrics.advance
+ self._em_width_cache[key] = width
+ return Kern(width * percentage)
+
+ _space_widths = {
+ r'\,': 0.16667, # 3/18 em = 3 mu
+ r'\thinspace': 0.16667, # 3/18 em = 3 mu
+ r'\/': 0.16667, # 3/18 em = 3 mu
+ r'\>': 0.22222, # 4/18 em = 4 mu
+ r'\:': 0.22222, # 4/18 em = 4 mu
+ r'\;': 0.27778, # 5/18 em = 5 mu
+ r'\ ': 0.33333, # 6/18 em = 6 mu
+ r'~': 0.33333, # 6/18 em = 6 mu, nonbreakable
+ r'\enspace': 0.5, # 9/18 em = 9 mu
+ r'\quad': 1, # 1 em = 18 mu
+ r'\qquad': 2, # 2 em = 36 mu
+ r'\!': -0.16667, # -3/18 em = -3 mu
+ }
+
+ def space(self, s, loc, toks):
+ tok, = toks
+ num = self._space_widths[tok]
+ box = self._make_space(num)
+ return [box]
+
+ def customspace(self, s, loc, toks):
+ return [self._make_space(float(toks[0]))]
+
+ def symbol(self, s, loc, toks):
+ c, = toks
+ try:
+ char = Char(c, self.get_state())
+ except ValueError as err:
+ raise ParseFatalException(s, loc,
+ "Unknown symbol: %s" % c) from err
+
+ if c in self._spaced_symbols:
+ # iterate until we find previous character, needed for cases
+ # such as ${ -2}$, $ -2$, or $ -2$.
+ prev_char = next((c for c in s[:loc][::-1] if c != ' '), '')
+ # Binary operators at start of string should not be spaced
+ if (c in self._binary_operators and
+ (len(s[:loc].split()) == 0 or prev_char == '{' or
+ prev_char in self._left_delim)):
+ return [char]
+ else:
+ return [Hlist([self._make_space(0.2),
+ char,
+ self._make_space(0.2)],
+ do_kern=True)]
+ elif c in self._punctuation_symbols:
+
+ # Do not space commas between brackets
+ if c == ',':
+ prev_char = next((c for c in s[:loc][::-1] if c != ' '), '')
+ next_char = next((c for c in s[loc + 1:] if c != ' '), '')
+ if prev_char == '{' and next_char == '}':
+ return [char]
+
+ # Do not space dots as decimal separators
+ if c == '.' and s[loc - 1].isdigit() and s[loc + 1].isdigit():
+ return [char]
+ else:
+ return [Hlist([char, self._make_space(0.2)], do_kern=True)]
+ return [char]
+
+ accentprefixed = symbol
+
+ def unknown_symbol(self, s, loc, toks):
+ c, = toks
+ raise ParseFatalException(s, loc, "Unknown symbol: %s" % c)
+
+ _char_over_chars = {
+ # The first 2 entries in the tuple are (font, char, sizescale) for
+ # the two symbols under and over. The third element is the space
+ # (in multiples of underline height)
+ r'AA': (('it', 'A', 1.0), (None, '\\circ', 0.5), 0.0),
+ }
+
+ def c_over_c(self, s, loc, toks):
+ sym, = toks
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+
+ under_desc, over_desc, space = \
+ self._char_over_chars.get(sym, (None, None, 0.0))
+ if under_desc is None:
+ raise ParseFatalException("Error parsing symbol")
+
+ over_state = state.copy()
+ if over_desc[0] is not None:
+ over_state.font = over_desc[0]
+ over_state.fontsize *= over_desc[2]
+ over = Accent(over_desc[1], over_state)
+
+ under_state = state.copy()
+ if under_desc[0] is not None:
+ under_state.font = under_desc[0]
+ under_state.fontsize *= under_desc[2]
+ under = Char(under_desc[1], under_state)
+
+ width = max(over.width, under.width)
+
+ over_centered = HCentered([over])
+ over_centered.hpack(width, 'exactly')
+
+ under_centered = HCentered([under])
+ under_centered.hpack(width, 'exactly')
+
+ return Vlist([
+ over_centered,
+ Vbox(0., thickness * space),
+ under_centered
+ ])
+
+ _accent_map = {
+ r'hat': r'\circumflexaccent',
+ r'breve': r'\combiningbreve',
+ r'bar': r'\combiningoverline',
+ r'grave': r'\combininggraveaccent',
+ r'acute': r'\combiningacuteaccent',
+ r'tilde': r'\combiningtilde',
+ r'dot': r'\combiningdotabove',
+ r'ddot': r'\combiningdiaeresis',
+ r'dddot': r'\combiningthreedotsabove',
+ r'ddddot': r'\combiningfourdotsabove',
+ r'vec': r'\combiningrightarrowabove',
+ r'"': r'\combiningdiaeresis',
+ r"`": r'\combininggraveaccent',
+ r"'": r'\combiningacuteaccent',
+ r'~': r'\combiningtilde',
+ r'.': r'\combiningdotabove',
+ r'^': r'\circumflexaccent',
+ r'overrightarrow': r'\rightarrow',
+ r'overleftarrow': r'\leftarrow',
+ r'mathring': r'\circ',
+ }
+
+ _wide_accents = set(r"widehat widetilde widebar".split())
+
+ # make a lambda and call it to get the namespace right
+ _accentprefixed = (lambda am: [
+ p for p in tex2uni
+ if any(p.startswith(a) and a != p for a in am)
+ ])(set(_accent_map))
+
+ def accent(self, s, loc, toks):
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ (accent, sym), = toks
+ if accent in self._wide_accents:
+ accent_box = AutoWidthChar(
+ '\\' + accent, sym.width, state, char_class=Accent)
+ else:
+ accent_box = Accent(self._accent_map[accent], state)
+ if accent == 'mathring':
+ accent_box.shrink()
+ accent_box.shrink()
+ centered = HCentered([Hbox(sym.width / 4.0), accent_box])
+ centered.hpack(sym.width, 'exactly')
+ return Vlist([
+ centered,
+ Vbox(0., thickness * 2.0),
+ Hlist([sym])
+ ])
+
+ def function(self, s, loc, toks):
+ hlist = self.operatorname(s, loc, toks)
+ hlist.function_name, = toks
+ return hlist
+
+ def operatorname(self, s, loc, toks):
+ self.push_state()
+ state = self.get_state()
+ state.font = 'rm'
+ hlist_list = []
+ # Change the font of Chars, but leave Kerns alone
+ for c in toks[0]:
+ if isinstance(c, Char):
+ c.font = 'rm'
+ c._update_metrics()
+ hlist_list.append(c)
+ elif isinstance(c, str):
+ hlist_list.append(Char(c, state))
+ else:
+ hlist_list.append(c)
+ next_char_loc = loc + len(toks[0]) + 1
+ if isinstance(toks[0], ParseResults):
+ next_char_loc += len('operatorname{}')
+ next_char = next((c for c in s[next_char_loc:] if c != ' '), '')
+ delimiters = self._left_delim | self._ambi_delim | self._right_delim
+ delimiters |= {'^', '_'}
+ if (next_char not in delimiters and
+ toks[0] not in self._overunder_functions):
+ # Add thin space except when followed by parenthesis, bracket, etc.
+ hlist_list += [self._make_space(self._space_widths[r'\,'])]
+ self.pop_state()
+ return Hlist(hlist_list)
+
+ def start_group(self, s, loc, toks):
+ self.push_state()
+ # Deal with LaTeX-style font tokens
+ if len(toks):
+ self.get_state().font = toks[0][4:]
+ return []
+
+ def group(self, s, loc, toks):
+ grp = Hlist(toks[0])
+ return [grp]
+ required_group = simple_group = group
+
+ def end_group(self, s, loc, toks):
+ self.pop_state()
+ return []
+
+ def font(self, s, loc, toks):
+ name, = toks
+ self.get_state().font = name
+ return []
+
+ def is_overunder(self, nucleus):
+ if isinstance(nucleus, Char):
+ return nucleus.c in self._overunder_symbols
+ elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
+ return nucleus.function_name in self._overunder_functions
+ return False
+
+ def is_dropsub(self, nucleus):
+ if isinstance(nucleus, Char):
+ return nucleus.c in self._dropsub_symbols
+ return False
+
+ def is_slanted(self, nucleus):
+ if isinstance(nucleus, Char):
+ return nucleus.is_slanted()
+ return False
+
+ def is_between_brackets(self, s, loc):
+ return False
+
+ def subsuper(self, s, loc, toks):
+ assert len(toks) == 1
+
+ nucleus = None
+ sub = None
+ super = None
+
+ # Pick all of the apostrophes out, including first apostrophes that
+ # have been parsed as characters
+ napostrophes = 0
+ new_toks = []
+ for tok in toks[0]:
+ if isinstance(tok, str) and tok not in ('^', '_'):
+ napostrophes += len(tok)
+ elif isinstance(tok, Char) and tok.c == "'":
+ napostrophes += 1
+ else:
+ new_toks.append(tok)
+ toks = new_toks
+
+ if len(toks) == 0:
+ assert napostrophes
+ nucleus = Hbox(0.0)
+ elif len(toks) == 1:
+ if not napostrophes:
+ return toks[0] # .asList()
+ else:
+ nucleus = toks[0]
+ elif len(toks) in (2, 3):
+ # single subscript or superscript
+ nucleus = toks[0] if len(toks) == 3 else Hbox(0.0)
+ op, next = toks[-2:]
+ if op == '_':
+ sub = next
+ else:
+ super = next
+ elif len(toks) in (4, 5):
+ # subscript and superscript
+ nucleus = toks[0] if len(toks) == 5 else Hbox(0.0)
+ op1, next1, op2, next2 = toks[-4:]
+ if op1 == op2:
+ if op1 == '_':
+ raise ParseFatalException("Double subscript")
+ else:
+ raise ParseFatalException("Double superscript")
+ if op1 == '_':
+ sub = next1
+ super = next2
+ else:
+ super = next1
+ sub = next2
+ else:
+ raise ParseFatalException(
+ "Subscript/superscript sequence is too long. "
+ "Use braces { } to remove ambiguity.")
+
+ state = self.get_state()
+ rule_thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ xHeight = state.font_output.get_xheight(
+ state.font, state.fontsize, state.dpi)
+
+ if napostrophes:
+ if super is None:
+ super = Hlist([])
+ for i in range(napostrophes):
+ super.children.extend(self.symbol(s, loc, ['\\prime']))
+ # kern() and hpack() needed to get the metrics right after
+ # extending
+ super.kern()
+ super.hpack()
+
+ # Handle over/under symbols, such as sum or prod
+ if self.is_overunder(nucleus):
+ vlist = []
+ shift = 0.
+ width = nucleus.width
+ if super is not None:
+ super.shrink()
+ width = max(width, super.width)
+ if sub is not None:
+ sub.shrink()
+ width = max(width, sub.width)
+
+ vgap = rule_thickness * 3.0
+ if super is not None:
+ hlist = HCentered([super])
+ hlist.hpack(width, 'exactly')
+ vlist.extend([hlist, Vbox(0, vgap)])
+ hlist = HCentered([nucleus])
+ hlist.hpack(width, 'exactly')
+ vlist.append(hlist)
+ if sub is not None:
+ hlist = HCentered([sub])
+ hlist.hpack(width, 'exactly')
+ vlist.extend([Vbox(0, vgap), hlist])
+ shift = hlist.height + vgap
+ vlist = Vlist(vlist)
+ vlist.shift_amount = shift + nucleus.depth
+ result = Hlist([vlist])
+ return [result]
+
+ # We remove kerning on the last character for consistency (otherwise
+ # it will compute kerning based on non-shrunk characters and may put
+ # them too close together when superscripted)
+ # We change the width of the last character to match the advance to
+ # consider some fonts with weird metrics: e.g. stix's f has a width of
+ # 7.75 and a kerning of -4.0 for an advance of 3.72, and we want to put
+ # the superscript at the advance
+ last_char = nucleus
+ if isinstance(nucleus, Hlist):
+ new_children = nucleus.children
+ if len(new_children):
+ # remove last kern
+ if (isinstance(new_children[-1], Kern) and
+ hasattr(new_children[-2], '_metrics')):
+ new_children = new_children[:-1]
+ last_char = new_children[-1]
+ if hasattr(last_char, '_metrics'):
+ last_char.width = last_char._metrics.advance
+ # create new Hlist without kerning
+ nucleus = Hlist(new_children, do_kern=False)
+ else:
+ if isinstance(nucleus, Char):
+ last_char.width = last_char._metrics.advance
+ nucleus = Hlist([nucleus])
+
+ # Handle regular sub/superscripts
+ constants = _get_font_constant_set(state)
+ lc_height = last_char.height
+ lc_baseline = 0
+ if self.is_dropsub(last_char):
+ lc_baseline = last_char.depth
+
+ # Compute kerning for sub and super
+ superkern = constants.delta * xHeight
+ subkern = constants.delta * xHeight
+ if self.is_slanted(last_char):
+ superkern += constants.delta * xHeight
+ superkern += (constants.delta_slanted *
+ (lc_height - xHeight * 2. / 3.))
+ if self.is_dropsub(last_char):
+ subkern = (3 * constants.delta -
+ constants.delta_integral) * lc_height
+ superkern = (3 * constants.delta +
+ constants.delta_integral) * lc_height
+ else:
+ subkern = 0
+
+ if super is None:
+ # node757
+ x = Hlist([Kern(subkern), sub])
+ x.shrink()
+ if self.is_dropsub(last_char):
+ shift_down = lc_baseline + constants.subdrop * xHeight
+ else:
+ shift_down = constants.sub1 * xHeight
+ x.shift_amount = shift_down
+ else:
+ x = Hlist([Kern(superkern), super])
+ x.shrink()
+ if self.is_dropsub(last_char):
+ shift_up = lc_height - constants.subdrop * xHeight
+ else:
+ shift_up = constants.sup1 * xHeight
+ if sub is None:
+ x.shift_amount = -shift_up
+ else: # Both sub and superscript
+ y = Hlist([Kern(subkern), sub])
+ y.shrink()
+ if self.is_dropsub(last_char):
+ shift_down = lc_baseline + constants.subdrop * xHeight
+ else:
+ shift_down = constants.sub2 * xHeight
+ # If sub and superscript collide, move super up
+ clr = (2.0 * rule_thickness -
+ ((shift_up - x.depth) - (y.height - shift_down)))
+ if clr > 0.:
+ shift_up += clr
+ x = Vlist([
+ x,
+ Kern((shift_up - x.depth) - (y.height - shift_down)),
+ y])
+ x.shift_amount = shift_down
+
+ if not self.is_dropsub(last_char):
+ x.width += constants.script_space * xHeight
+ result = Hlist([nucleus, x])
+
+ return [result]
+
+ def _genfrac(self, ldelim, rdelim, rule, style, num, den):
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+
+ rule = float(rule)
+
+ if style is not self._MathStyle.DISPLAYSTYLE:
+ num.shrink()
+ den.shrink()
+ cnum = HCentered([num])
+ cden = HCentered([den])
+ width = max(num.width, den.width)
+ cnum.hpack(width, 'exactly')
+ cden.hpack(width, 'exactly')
+ vlist = Vlist([cnum, # numerator
+ Vbox(0, thickness * 2.0), # space
+ Hrule(state, rule), # rule
+ Vbox(0, thickness * 2.0), # space
+ cden # denominator
+ ])
+
+ # Shift so the fraction line sits in the middle of the
+ # equals sign
+ metrics = state.font_output.get_metrics(
+ state.font, mpl.rcParams['mathtext.default'],
+ '=', state.fontsize, state.dpi)
+ shift = (cden.height -
+ ((metrics.ymax + metrics.ymin) / 2 -
+ thickness * 3.0))
+ vlist.shift_amount = shift
+
+ result = [Hlist([vlist, Hbox(thickness * 2.)])]
+ if ldelim or rdelim:
+ if ldelim == '':
+ ldelim = '.'
+ if rdelim == '':
+ rdelim = '.'
+ return self._auto_sized_delimiter(ldelim, result, rdelim)
+ return result
+
+ def genfrac(self, s, loc, toks):
+ args, = toks
+ return self._genfrac(*args)
+
+ def frac(self, s, loc, toks):
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ (num, den), = toks
+ return self._genfrac('', '', thickness, self._MathStyle.TEXTSTYLE,
+ num, den)
+
+ def dfrac(self, s, loc, toks):
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+ (num, den), = toks
+ return self._genfrac('', '', thickness, self._MathStyle.DISPLAYSTYLE,
+ num, den)
+
+ def binom(self, s, loc, toks):
+ (num, den), = toks
+ return self._genfrac('(', ')', 0.0, self._MathStyle.TEXTSTYLE,
+ num, den)
+
+ def _genset(self, s, loc, toks):
+ (annotation, body), = toks
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+
+ annotation.shrink()
+ cannotation = HCentered([annotation])
+ cbody = HCentered([body])
+ width = max(cannotation.width, cbody.width)
+ cannotation.hpack(width, 'exactly')
+ cbody.hpack(width, 'exactly')
+
+ vgap = thickness * 3
+ if s[loc + 1] == "u": # \underset
+ vlist = Vlist([cbody, # body
+ Vbox(0, vgap), # space
+ cannotation # annotation
+ ])
+ # Shift so the body sits in the same vertical position
+ vlist.shift_amount = cbody.depth + cannotation.height + vgap
+ else: # \overset
+ vlist = Vlist([cannotation, # annotation
+ Vbox(0, vgap), # space
+ cbody # body
+ ])
+
+ # To add horizontal gap between symbols: wrap the Vlist into
+ # an Hlist and extend it with an Hbox(0, horizontal_gap)
+ return vlist
+
+ overset = underset = _genset
+
+ def sqrt(self, s, loc, toks):
+ (root, body), = toks
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+
+ # Determine the height of the body, and add a little extra to
+ # the height so it doesn't seem cramped
+ height = body.height - body.shift_amount + thickness * 5.0
+ depth = body.depth + body.shift_amount
+ check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
+ height = check.height - check.shift_amount
+ depth = check.depth + check.shift_amount
+
+ # Put a little extra space to the left and right of the body
+ padded_body = Hlist([Hbox(2 * thickness), body, Hbox(2 * thickness)])
+ rightside = Vlist([Hrule(state), Glue('fill'), padded_body])
+ # Stretch the glue between the hrule and the body
+ rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
+ 'exactly', depth)
+
+ # Add the root and shift it upward so it is above the tick.
+ # The value of 0.6 is a hard-coded hack ;)
+ if not root:
+ root = Box(check.width * 0.5, 0., 0.)
+ else:
+ root = Hlist(root)
+ root.shrink()
+ root.shrink()
+
+ root_vlist = Vlist([Hlist([root])])
+ root_vlist.shift_amount = -height * 0.6
+
+ hlist = Hlist([root_vlist, # Root
+ # Negative kerning to put root over tick
+ Kern(-check.width * 0.5),
+ check, # Check
+ rightside]) # Body
+ return [hlist]
+
+ def overline(self, s, loc, toks):
+ (body,), = toks
+
+ state = self.get_state()
+ thickness = state.font_output.get_underline_thickness(
+ state.font, state.fontsize, state.dpi)
+
+ height = body.height - body.shift_amount + thickness * 3.0
+ depth = body.depth + body.shift_amount
+
+ # Place overline above body
+ rightside = Vlist([Hrule(state), Glue('fill'), Hlist([body])])
+
+ # Stretch the glue between the hrule and the body
+ rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
+ 'exactly', depth)
+
+ hlist = Hlist([rightside])
+ return [hlist]
+
+ def _auto_sized_delimiter(self, front, middle, back):
+ state = self.get_state()
+ if len(middle):
+ height = max(x.height for x in middle)
+ depth = max(x.depth for x in middle)
+ factor = None
+ else:
+ height = 0
+ depth = 0
+ factor = 1.0
+ parts = []
+ # \left. and \right. aren't supposed to produce any symbols
+ if front != '.':
+ parts.append(
+ AutoHeightChar(front, height, depth, state, factor=factor))
+ parts.extend(middle)
+ if back != '.':
+ parts.append(
+ AutoHeightChar(back, height, depth, state, factor=factor))
+ hlist = Hlist(parts)
+ return hlist
+
+ def auto_delim(self, s, loc, toks):
+ front, middle, back = toks
+
+ return self._auto_sized_delimiter(front, middle.asList(), back)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/backend_tools.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/backend_tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..04ba8f665f1fc1618d3fe21e69df4ed590c87786
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/backend_tools.py
@@ -0,0 +1,1003 @@
+"""
+Abstract base classes define the primitives for Tools.
+These tools are used by `matplotlib.backend_managers.ToolManager`
+
+:class:`ToolBase`
+ Simple stateless tool
+
+:class:`ToolToggleBase`
+ Tool that has two states, only one Toggle tool can be
+ active at any given time for the same
+ `matplotlib.backend_managers.ToolManager`
+"""
+
+import enum
+import re
+import time
+from types import SimpleNamespace
+import uuid
+from weakref import WeakKeyDictionary
+
+import numpy as np
+
+import matplotlib as mpl
+from matplotlib._pylab_helpers import Gcf
+from matplotlib import _api, cbook
+
+
+class Cursors(enum.IntEnum): # Must subclass int for the macOS backend.
+ """Backend-independent cursor types."""
+ POINTER = enum.auto()
+ HAND = enum.auto()
+ SELECT_REGION = enum.auto()
+ MOVE = enum.auto()
+ WAIT = enum.auto()
+ RESIZE_HORIZONTAL = enum.auto()
+ RESIZE_VERTICAL = enum.auto()
+cursors = Cursors # Backcompat.
+
+# Views positions tool
+_views_positions = 'viewpos'
+
+
+class ToolBase:
+ """
+ Base tool class.
+
+ A base tool, only implements `trigger` method or no method at all.
+ The tool is instantiated by `matplotlib.backend_managers.ToolManager`.
+ """
+
+ default_keymap = None
+ """
+ Keymap to associate with this tool.
+
+ ``list[str]``: List of keys that will trigger this tool when a keypress
+ event is emitted on ``self.figure.canvas``.
+ """
+
+ description = None
+ """
+ Description of the Tool.
+
+ `str`: Tooltip used if the Tool is included in a Toolbar.
+ """
+
+ image = None
+ """
+ Filename of the image.
+
+ `str`: Filename of the image to use in a Toolbar. If None, the *name* is
+ used as a label in the toolbar button.
+ """
+
+ def __init__(self, toolmanager, name):
+ self._name = name
+ self._toolmanager = toolmanager
+ self._figure = None
+
+ name = property(
+ lambda self: self._name,
+ doc="The tool id (str, must be unique among tools of a tool manager).")
+ toolmanager = property(
+ lambda self: self._toolmanager,
+ doc="The `.ToolManager` that controls this tool.")
+ canvas = property(
+ lambda self: self._figure.canvas if self._figure is not None else None,
+ doc="The canvas of the figure affected by this tool, or None.")
+
+ @property
+ def figure(self):
+ """The Figure affected by this tool, or None."""
+ return self._figure
+
+ @figure.setter
+ def figure(self, figure):
+ self._figure = figure
+
+ set_figure = figure.fset
+
+ def _make_classic_style_pseudo_toolbar(self):
+ """
+ Return a placeholder object with a single `canvas` attribute.
+
+ This is useful to reuse the implementations of tools already provided
+ by the classic Toolbars.
+ """
+ return SimpleNamespace(canvas=self.canvas)
+
+ def trigger(self, sender, event, data=None):
+ """
+ Called when this tool gets used.
+
+ This method is called by `.ToolManager.trigger_tool`.
+
+ Parameters
+ ----------
+ event : `.Event`
+ The canvas event that caused this tool to be called.
+ sender : object
+ Object that requested the tool to be triggered.
+ data : object
+ Extra data.
+ """
+ pass
+
+ def destroy(self):
+ """
+ Destroy the tool.
+
+ This method is called by `.ToolManager.remove_tool`.
+ """
+ pass
+
+
+class ToolToggleBase(ToolBase):
+ """
+ Toggleable tool.
+
+ Every time it is triggered, it switches between enable and disable.
+
+ Parameters
+ ----------
+ ``*args``
+ Variable length argument to be used by the Tool.
+ ``**kwargs``
+ `toggled` if present and True, sets the initial state of the Tool
+ Arbitrary keyword arguments to be consumed by the Tool
+ """
+
+ radio_group = None
+ """
+ Attribute to group 'radio' like tools (mutually exclusive).
+
+ `str` that identifies the group or **None** if not belonging to a group.
+ """
+
+ cursor = None
+ """Cursor to use when the tool is active."""
+
+ default_toggled = False
+ """Default of toggled state."""
+
+ def __init__(self, *args, **kwargs):
+ self._toggled = kwargs.pop('toggled', self.default_toggled)
+ super().__init__(*args, **kwargs)
+
+ def trigger(self, sender, event, data=None):
+ """Calls `enable` or `disable` based on `toggled` value."""
+ if self._toggled:
+ self.disable(event)
+ else:
+ self.enable(event)
+ self._toggled = not self._toggled
+
+ def enable(self, event=None):
+ """
+ Enable the toggle tool.
+
+ `trigger` calls this method when `toggled` is False.
+ """
+ pass
+
+ def disable(self, event=None):
+ """
+ Disable the toggle tool.
+
+ `trigger` call this method when `toggled` is True.
+
+ This can happen in different circumstances.
+
+ * Click on the toolbar tool button.
+ * Call to `matplotlib.backend_managers.ToolManager.trigger_tool`.
+ * Another `ToolToggleBase` derived tool is triggered
+ (from the same `.ToolManager`).
+ """
+ pass
+
+ @property
+ def toggled(self):
+ """State of the toggled tool."""
+ return self._toggled
+
+ def set_figure(self, figure):
+ toggled = self.toggled
+ if toggled:
+ if self.figure:
+ self.trigger(self, None)
+ else:
+ # if no figure the internal state is not changed
+ # we change it here so next call to trigger will change it back
+ self._toggled = False
+ super().set_figure(figure)
+ if toggled:
+ if figure:
+ self.trigger(self, None)
+ else:
+ # if there is no figure, trigger won't change the internal
+ # state we change it back
+ self._toggled = True
+
+
+class SetCursorBase(ToolBase):
+ """
+ Change to the current cursor while inaxes.
+
+ This tool, keeps track of all `ToolToggleBase` derived tools, and calls
+ `set_cursor` when a tool gets triggered.
+ """
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._id_drag = None
+ self._current_tool = None
+ self._default_cursor = cursors.POINTER
+ self._last_cursor = self._default_cursor
+ self.toolmanager.toolmanager_connect('tool_added_event',
+ self._add_tool_cbk)
+ # process current tools
+ for tool in self.toolmanager.tools.values():
+ self._add_tool(tool)
+
+ def set_figure(self, figure):
+ if self._id_drag:
+ self.canvas.mpl_disconnect(self._id_drag)
+ super().set_figure(figure)
+ if figure:
+ self._id_drag = self.canvas.mpl_connect(
+ 'motion_notify_event', self._set_cursor_cbk)
+
+ def _tool_trigger_cbk(self, event):
+ if event.tool.toggled:
+ self._current_tool = event.tool
+ else:
+ self._current_tool = None
+ self._set_cursor_cbk(event.canvasevent)
+
+ def _add_tool(self, tool):
+ """Set the cursor when the tool is triggered."""
+ if getattr(tool, 'cursor', None) is not None:
+ self.toolmanager.toolmanager_connect('tool_trigger_%s' % tool.name,
+ self._tool_trigger_cbk)
+
+ def _add_tool_cbk(self, event):
+ """Process every newly added tool."""
+ if event.tool is self:
+ return
+ self._add_tool(event.tool)
+
+ def _set_cursor_cbk(self, event):
+ if not event or not self.canvas:
+ return
+ if (self._current_tool and getattr(event, "inaxes", None)
+ and event.inaxes.get_navigate()):
+ if self._last_cursor != self._current_tool.cursor:
+ self.canvas.set_cursor(self._current_tool.cursor)
+ self._last_cursor = self._current_tool.cursor
+ elif self._last_cursor != self._default_cursor:
+ self.canvas.set_cursor(self._default_cursor)
+ self._last_cursor = self._default_cursor
+
+ @_api.deprecated("3.5", alternative="`.FigureCanvasBase.set_cursor`")
+ def set_cursor(self, cursor):
+ """
+ Set the cursor.
+ """
+ self.canvas.set_cursor(cursor)
+
+
+# This exists solely for deprecation warnings; remove with
+# SetCursorBase.set_cursor.
+ToolSetCursor = SetCursorBase
+
+
+class ToolCursorPosition(ToolBase):
+ """
+ Send message with the current pointer position.
+
+ This tool runs in the background reporting the position of the cursor.
+ """
+ def __init__(self, *args, **kwargs):
+ self._id_drag = None
+ super().__init__(*args, **kwargs)
+
+ def set_figure(self, figure):
+ if self._id_drag:
+ self.canvas.mpl_disconnect(self._id_drag)
+ super().set_figure(figure)
+ if figure:
+ self._id_drag = self.canvas.mpl_connect(
+ 'motion_notify_event', self.send_message)
+
+ def send_message(self, event):
+ """Call `matplotlib.backend_managers.ToolManager.message_event`."""
+ if self.toolmanager.messagelock.locked():
+ return
+
+ from matplotlib.backend_bases import NavigationToolbar2
+ message = NavigationToolbar2._mouse_event_to_message(event)
+ if message is None:
+ message = ' '
+ self.toolmanager.message_event(message, self)
+
+
+class RubberbandBase(ToolBase):
+ """Draw and remove a rubberband."""
+ def trigger(self, sender, event, data):
+ """Call `draw_rubberband` or `remove_rubberband` based on data."""
+ if not self.figure.canvas.widgetlock.available(sender):
+ return
+ if data is not None:
+ self.draw_rubberband(*data)
+ else:
+ self.remove_rubberband()
+
+ def draw_rubberband(self, *data):
+ """
+ Draw rubberband.
+
+ This method must get implemented per backend.
+ """
+ raise NotImplementedError
+
+ def remove_rubberband(self):
+ """
+ Remove rubberband.
+
+ This method should get implemented per backend.
+ """
+ pass
+
+
+class ToolQuit(ToolBase):
+ """Tool to call the figure manager destroy method."""
+
+ description = 'Quit the figure'
+ default_keymap = mpl.rcParams['keymap.quit']
+
+ def trigger(self, sender, event, data=None):
+ Gcf.destroy_fig(self.figure)
+
+
+class ToolQuitAll(ToolBase):
+ """Tool to call the figure manager destroy method."""
+
+ description = 'Quit all figures'
+ default_keymap = mpl.rcParams['keymap.quit_all']
+
+ def trigger(self, sender, event, data=None):
+ Gcf.destroy_all()
+
+
+class ToolGrid(ToolBase):
+ """Tool to toggle the major grids of the figure."""
+
+ description = 'Toggle major grids'
+ default_keymap = mpl.rcParams['keymap.grid']
+
+ def trigger(self, sender, event, data=None):
+ sentinel = str(uuid.uuid4())
+ # Trigger grid switching by temporarily setting :rc:`keymap.grid`
+ # to a unique key and sending an appropriate event.
+ with cbook._setattr_cm(event, key=sentinel), \
+ mpl.rc_context({'keymap.grid': sentinel}):
+ mpl.backend_bases.key_press_handler(event, self.figure.canvas)
+
+
+class ToolMinorGrid(ToolBase):
+ """Tool to toggle the major and minor grids of the figure."""
+
+ description = 'Toggle major and minor grids'
+ default_keymap = mpl.rcParams['keymap.grid_minor']
+
+ def trigger(self, sender, event, data=None):
+ sentinel = str(uuid.uuid4())
+ # Trigger grid switching by temporarily setting :rc:`keymap.grid_minor`
+ # to a unique key and sending an appropriate event.
+ with cbook._setattr_cm(event, key=sentinel), \
+ mpl.rc_context({'keymap.grid_minor': sentinel}):
+ mpl.backend_bases.key_press_handler(event, self.figure.canvas)
+
+
+class ToolFullScreen(ToolToggleBase):
+ """Tool to toggle full screen."""
+
+ description = 'Toggle fullscreen mode'
+ default_keymap = mpl.rcParams['keymap.fullscreen']
+
+ def enable(self, event):
+ self.figure.canvas.manager.full_screen_toggle()
+
+ def disable(self, event):
+ self.figure.canvas.manager.full_screen_toggle()
+
+
+class AxisScaleBase(ToolToggleBase):
+ """Base Tool to toggle between linear and logarithmic."""
+
+ def trigger(self, sender, event, data=None):
+ if event.inaxes is None:
+ return
+ super().trigger(sender, event, data)
+
+ def enable(self, event):
+ self.set_scale(event.inaxes, 'log')
+ self.figure.canvas.draw_idle()
+
+ def disable(self, event):
+ self.set_scale(event.inaxes, 'linear')
+ self.figure.canvas.draw_idle()
+
+
+class ToolYScale(AxisScaleBase):
+ """Tool to toggle between linear and logarithmic scales on the Y axis."""
+
+ description = 'Toggle scale Y axis'
+ default_keymap = mpl.rcParams['keymap.yscale']
+
+ def set_scale(self, ax, scale):
+ ax.set_yscale(scale)
+
+
+class ToolXScale(AxisScaleBase):
+ """Tool to toggle between linear and logarithmic scales on the X axis."""
+
+ description = 'Toggle scale X axis'
+ default_keymap = mpl.rcParams['keymap.xscale']
+
+ def set_scale(self, ax, scale):
+ ax.set_xscale(scale)
+
+
+class ToolViewsPositions(ToolBase):
+ """
+ Auxiliary Tool to handle changes in views and positions.
+
+ Runs in the background and should get used by all the tools that
+ need to access the figure's history of views and positions, e.g.
+
+ * `ToolZoom`
+ * `ToolPan`
+ * `ToolHome`
+ * `ToolBack`
+ * `ToolForward`
+ """
+
+ def __init__(self, *args, **kwargs):
+ self.views = WeakKeyDictionary()
+ self.positions = WeakKeyDictionary()
+ self.home_views = WeakKeyDictionary()
+ super().__init__(*args, **kwargs)
+
+ def add_figure(self, figure):
+ """Add the current figure to the stack of views and positions."""
+
+ if figure not in self.views:
+ self.views[figure] = cbook.Stack()
+ self.positions[figure] = cbook.Stack()
+ self.home_views[figure] = WeakKeyDictionary()
+ # Define Home
+ self.push_current(figure)
+ # Make sure we add a home view for new axes as they're added
+ figure.add_axobserver(lambda fig: self.update_home_views(fig))
+
+ def clear(self, figure):
+ """Reset the axes stack."""
+ if figure in self.views:
+ self.views[figure].clear()
+ self.positions[figure].clear()
+ self.home_views[figure].clear()
+ self.update_home_views()
+
+ def update_view(self):
+ """
+ Update the view limits and position for each axes from the current
+ stack position. If any axes are present in the figure that aren't in
+ the current stack position, use the home view limits for those axes and
+ don't update *any* positions.
+ """
+
+ views = self.views[self.figure]()
+ if views is None:
+ return
+ pos = self.positions[self.figure]()
+ if pos is None:
+ return
+ home_views = self.home_views[self.figure]
+ all_axes = self.figure.get_axes()
+ for a in all_axes:
+ if a in views:
+ cur_view = views[a]
+ else:
+ cur_view = home_views[a]
+ a._set_view(cur_view)
+
+ if set(all_axes).issubset(pos):
+ for a in all_axes:
+ # Restore both the original and modified positions
+ a._set_position(pos[a][0], 'original')
+ a._set_position(pos[a][1], 'active')
+
+ self.figure.canvas.draw_idle()
+
+ def push_current(self, figure=None):
+ """
+ Push the current view limits and position onto their respective stacks.
+ """
+ if not figure:
+ figure = self.figure
+ views = WeakKeyDictionary()
+ pos = WeakKeyDictionary()
+ for a in figure.get_axes():
+ views[a] = a._get_view()
+ pos[a] = self._axes_pos(a)
+ self.views[figure].push(views)
+ self.positions[figure].push(pos)
+
+ def _axes_pos(self, ax):
+ """
+ Return the original and modified positions for the specified axes.
+
+ Parameters
+ ----------
+ ax : matplotlib.axes.Axes
+ The `.Axes` to get the positions for.
+
+ Returns
+ -------
+ original_position, modified_position
+ A tuple of the original and modified positions.
+ """
+
+ return (ax.get_position(True).frozen(),
+ ax.get_position().frozen())
+
+ def update_home_views(self, figure=None):
+ """
+ Make sure that ``self.home_views`` has an entry for all axes present
+ in the figure.
+ """
+
+ if not figure:
+ figure = self.figure
+ for a in figure.get_axes():
+ if a not in self.home_views[figure]:
+ self.home_views[figure][a] = a._get_view()
+
+ def home(self):
+ """Recall the first view and position from the stack."""
+ self.views[self.figure].home()
+ self.positions[self.figure].home()
+
+ def back(self):
+ """Back one step in the stack of views and positions."""
+ self.views[self.figure].back()
+ self.positions[self.figure].back()
+
+ def forward(self):
+ """Forward one step in the stack of views and positions."""
+ self.views[self.figure].forward()
+ self.positions[self.figure].forward()
+
+
+class ViewsPositionsBase(ToolBase):
+ """Base class for `ToolHome`, `ToolBack` and `ToolForward`."""
+
+ _on_trigger = None
+
+ def trigger(self, sender, event, data=None):
+ self.toolmanager.get_tool(_views_positions).add_figure(self.figure)
+ getattr(self.toolmanager.get_tool(_views_positions),
+ self._on_trigger)()
+ self.toolmanager.get_tool(_views_positions).update_view()
+
+
+class ToolHome(ViewsPositionsBase):
+ """Restore the original view limits."""
+
+ description = 'Reset original view'
+ image = 'home'
+ default_keymap = mpl.rcParams['keymap.home']
+ _on_trigger = 'home'
+
+
+class ToolBack(ViewsPositionsBase):
+ """Move back up the view limits stack."""
+
+ description = 'Back to previous view'
+ image = 'back'
+ default_keymap = mpl.rcParams['keymap.back']
+ _on_trigger = 'back'
+
+
+class ToolForward(ViewsPositionsBase):
+ """Move forward in the view lim stack."""
+
+ description = 'Forward to next view'
+ image = 'forward'
+ default_keymap = mpl.rcParams['keymap.forward']
+ _on_trigger = 'forward'
+
+
+class ConfigureSubplotsBase(ToolBase):
+ """Base tool for the configuration of subplots."""
+
+ description = 'Configure subplots'
+ image = 'subplots'
+
+
+class SaveFigureBase(ToolBase):
+ """Base tool for figure saving."""
+
+ description = 'Save the figure'
+ image = 'filesave'
+ default_keymap = mpl.rcParams['keymap.save']
+
+
+class ZoomPanBase(ToolToggleBase):
+ """Base class for `ToolZoom` and `ToolPan`."""
+ def __init__(self, *args):
+ super().__init__(*args)
+ self._button_pressed = None
+ self._xypress = None
+ self._idPress = None
+ self._idRelease = None
+ self._idScroll = None
+ self.base_scale = 2.
+ self.scrollthresh = .5 # .5 second scroll threshold
+ self.lastscroll = time.time()-self.scrollthresh
+
+ def enable(self, event):
+ """Connect press/release events and lock the canvas."""
+ self.figure.canvas.widgetlock(self)
+ self._idPress = self.figure.canvas.mpl_connect(
+ 'button_press_event', self._press)
+ self._idRelease = self.figure.canvas.mpl_connect(
+ 'button_release_event', self._release)
+ self._idScroll = self.figure.canvas.mpl_connect(
+ 'scroll_event', self.scroll_zoom)
+
+ def disable(self, event):
+ """Release the canvas and disconnect press/release events."""
+ self._cancel_action()
+ self.figure.canvas.widgetlock.release(self)
+ self.figure.canvas.mpl_disconnect(self._idPress)
+ self.figure.canvas.mpl_disconnect(self._idRelease)
+ self.figure.canvas.mpl_disconnect(self._idScroll)
+
+ def trigger(self, sender, event, data=None):
+ self.toolmanager.get_tool(_views_positions).add_figure(self.figure)
+ super().trigger(sender, event, data)
+ new_navigate_mode = self.name.upper() if self.toggled else None
+ for ax in self.figure.axes:
+ ax.set_navigate_mode(new_navigate_mode)
+
+ def scroll_zoom(self, event):
+ # https://gist.github.com/tacaswell/3144287
+ if event.inaxes is None:
+ return
+
+ if event.button == 'up':
+ # deal with zoom in
+ scl = self.base_scale
+ elif event.button == 'down':
+ # deal with zoom out
+ scl = 1/self.base_scale
+ else:
+ # deal with something that should never happen
+ scl = 1
+
+ ax = event.inaxes
+ ax._set_view_from_bbox([event.x, event.y, scl])
+
+ # If last scroll was done within the timing threshold, delete the
+ # previous view
+ if (time.time()-self.lastscroll) < self.scrollthresh:
+ self.toolmanager.get_tool(_views_positions).back()
+
+ self.figure.canvas.draw_idle() # force re-draw
+
+ self.lastscroll = time.time()
+ self.toolmanager.get_tool(_views_positions).push_current()
+
+
+class ToolZoom(ZoomPanBase):
+ """A Tool for zooming using a rectangle selector."""
+
+ description = 'Zoom to rectangle'
+ image = 'zoom_to_rect'
+ default_keymap = mpl.rcParams['keymap.zoom']
+ cursor = cursors.SELECT_REGION
+ radio_group = 'default'
+
+ def __init__(self, *args):
+ super().__init__(*args)
+ self._ids_zoom = []
+
+ def _cancel_action(self):
+ for zoom_id in self._ids_zoom:
+ self.figure.canvas.mpl_disconnect(zoom_id)
+ self.toolmanager.trigger_tool('rubberband', self)
+ self.figure.canvas.draw_idle()
+ self._xypress = None
+ self._button_pressed = None
+ self._ids_zoom = []
+ return
+
+ def _press(self, event):
+ """Callback for mouse button presses in zoom-to-rectangle mode."""
+
+ # If we're already in the middle of a zoom, pressing another
+ # button works to "cancel"
+ if self._ids_zoom:
+ self._cancel_action()
+
+ if event.button == 1:
+ self._button_pressed = 1
+ elif event.button == 3:
+ self._button_pressed = 3
+ else:
+ self._cancel_action()
+ return
+
+ x, y = event.x, event.y
+
+ self._xypress = []
+ for i, a in enumerate(self.figure.get_axes()):
+ if (x is not None and y is not None and a.in_axes(event) and
+ a.get_navigate() and a.can_zoom()):
+ self._xypress.append((x, y, a, i, a._get_view()))
+
+ id1 = self.figure.canvas.mpl_connect(
+ 'motion_notify_event', self._mouse_move)
+ id2 = self.figure.canvas.mpl_connect(
+ 'key_press_event', self._switch_on_zoom_mode)
+ id3 = self.figure.canvas.mpl_connect(
+ 'key_release_event', self._switch_off_zoom_mode)
+
+ self._ids_zoom = id1, id2, id3
+ self._zoom_mode = event.key
+
+ def _switch_on_zoom_mode(self, event):
+ self._zoom_mode = event.key
+ self._mouse_move(event)
+
+ def _switch_off_zoom_mode(self, event):
+ self._zoom_mode = None
+ self._mouse_move(event)
+
+ def _mouse_move(self, event):
+ """Callback for mouse moves in zoom-to-rectangle mode."""
+
+ if self._xypress:
+ x, y = event.x, event.y
+ lastx, lasty, a, ind, view = self._xypress[0]
+ (x1, y1), (x2, y2) = np.clip(
+ [[lastx, lasty], [x, y]], a.bbox.min, a.bbox.max)
+ if self._zoom_mode == "x":
+ y1, y2 = a.bbox.intervaly
+ elif self._zoom_mode == "y":
+ x1, x2 = a.bbox.intervalx
+ self.toolmanager.trigger_tool(
+ 'rubberband', self, data=(x1, y1, x2, y2))
+
+ def _release(self, event):
+ """Callback for mouse button releases in zoom-to-rectangle mode."""
+
+ for zoom_id in self._ids_zoom:
+ self.figure.canvas.mpl_disconnect(zoom_id)
+ self._ids_zoom = []
+
+ if not self._xypress:
+ self._cancel_action()
+ return
+
+ last_a = []
+
+ for cur_xypress in self._xypress:
+ x, y = event.x, event.y
+ lastx, lasty, a, _ind, view = cur_xypress
+ # ignore singular clicks - 5 pixels is a threshold
+ if abs(x - lastx) < 5 or abs(y - lasty) < 5:
+ self._cancel_action()
+ return
+
+ # detect twinx, twiny axes and avoid double zooming
+ twinx, twiny = False, False
+ if last_a:
+ for la in last_a:
+ if a.get_shared_x_axes().joined(a, la):
+ twinx = True
+ if a.get_shared_y_axes().joined(a, la):
+ twiny = True
+ last_a.append(a)
+
+ if self._button_pressed == 1:
+ direction = 'in'
+ elif self._button_pressed == 3:
+ direction = 'out'
+ else:
+ continue
+
+ a._set_view_from_bbox((lastx, lasty, x, y), direction,
+ self._zoom_mode, twinx, twiny)
+
+ self._zoom_mode = None
+ self.toolmanager.get_tool(_views_positions).push_current()
+ self._cancel_action()
+
+
+class ToolPan(ZoomPanBase):
+ """Pan axes with left mouse, zoom with right."""
+
+ default_keymap = mpl.rcParams['keymap.pan']
+ description = 'Pan axes with left mouse, zoom with right'
+ image = 'move'
+ cursor = cursors.MOVE
+ radio_group = 'default'
+
+ def __init__(self, *args):
+ super().__init__(*args)
+ self._id_drag = None
+
+ def _cancel_action(self):
+ self._button_pressed = None
+ self._xypress = []
+ self.figure.canvas.mpl_disconnect(self._id_drag)
+ self.toolmanager.messagelock.release(self)
+ self.figure.canvas.draw_idle()
+
+ def _press(self, event):
+ if event.button == 1:
+ self._button_pressed = 1
+ elif event.button == 3:
+ self._button_pressed = 3
+ else:
+ self._cancel_action()
+ return
+
+ x, y = event.x, event.y
+
+ self._xypress = []
+ for i, a in enumerate(self.figure.get_axes()):
+ if (x is not None and y is not None and a.in_axes(event) and
+ a.get_navigate() and a.can_pan()):
+ a.start_pan(x, y, event.button)
+ self._xypress.append((a, i))
+ self.toolmanager.messagelock(self)
+ self._id_drag = self.figure.canvas.mpl_connect(
+ 'motion_notify_event', self._mouse_move)
+
+ def _release(self, event):
+ if self._button_pressed is None:
+ self._cancel_action()
+ return
+
+ self.figure.canvas.mpl_disconnect(self._id_drag)
+ self.toolmanager.messagelock.release(self)
+
+ for a, _ind in self._xypress:
+ a.end_pan()
+ if not self._xypress:
+ self._cancel_action()
+ return
+
+ self.toolmanager.get_tool(_views_positions).push_current()
+ self._cancel_action()
+
+ def _mouse_move(self, event):
+ for a, _ind in self._xypress:
+ # safer to use the recorded button at the _press than current
+ # button: # multiple button can get pressed during motion...
+ a.drag_pan(self._button_pressed, event.key, event.x, event.y)
+ self.toolmanager.canvas.draw_idle()
+
+
+class ToolHelpBase(ToolBase):
+ description = 'Print tool list, shortcuts and description'
+ default_keymap = mpl.rcParams['keymap.help']
+ image = 'help'
+
+ @staticmethod
+ def format_shortcut(key_sequence):
+ """
+ Convert a shortcut string from the notation used in rc config to the
+ standard notation for displaying shortcuts, e.g. 'ctrl+a' -> 'Ctrl+A'.
+ """
+ return (key_sequence if len(key_sequence) == 1 else
+ re.sub(r"\+[A-Z]", r"+Shift\g<0>", key_sequence).title())
+
+ def _format_tool_keymap(self, name):
+ keymaps = self.toolmanager.get_tool_keymap(name)
+ return ", ".join(self.format_shortcut(keymap) for keymap in keymaps)
+
+ def _get_help_entries(self):
+ return [(name, self._format_tool_keymap(name), tool.description)
+ for name, tool in sorted(self.toolmanager.tools.items())
+ if tool.description]
+
+ def _get_help_text(self):
+ entries = self._get_help_entries()
+ entries = ["{}: {}\n\t{}".format(*entry) for entry in entries]
+ return "\n".join(entries)
+
+ def _get_help_html(self):
+ fmt = "| {} | {} | {} |
"
+ rows = [fmt.format(
+ "Action", "Shortcuts", "Description")]
+ rows += [fmt.format(*row) for row in self._get_help_entries()]
+ return (""
+ "" + rows[0] + ""
+ "".join(rows[1:]) + "
")
+
+
+class ToolCopyToClipboardBase(ToolBase):
+ """Tool to copy the figure to the clipboard."""
+
+ description = 'Copy the canvas figure to clipboard'
+ default_keymap = mpl.rcParams['keymap.copy']
+
+ def trigger(self, *args, **kwargs):
+ message = "Copy tool is not available"
+ self.toolmanager.message_event(message, self)
+
+
+default_tools = {'home': ToolHome, 'back': ToolBack, 'forward': ToolForward,
+ 'zoom': ToolZoom, 'pan': ToolPan,
+ 'subplots': 'ToolConfigureSubplots',
+ 'save': 'ToolSaveFigure',
+ 'grid': ToolGrid,
+ 'grid_minor': ToolMinorGrid,
+ 'fullscreen': ToolFullScreen,
+ 'quit': ToolQuit,
+ 'quit_all': ToolQuitAll,
+ 'xscale': ToolXScale,
+ 'yscale': ToolYScale,
+ 'position': ToolCursorPosition,
+ _views_positions: ToolViewsPositions,
+ 'cursor': 'ToolSetCursor',
+ 'rubberband': 'ToolRubberband',
+ 'help': 'ToolHelp',
+ 'copy': 'ToolCopyToClipboard',
+ }
+"""Default tools"""
+
+default_toolbar_tools = [['navigation', ['home', 'back', 'forward']],
+ ['zoompan', ['pan', 'zoom', 'subplots']],
+ ['io', ['save', 'help']]]
+"""Default tools in the toolbar"""
+
+
+def add_tools_to_manager(toolmanager, tools=default_tools):
+ """
+ Add multiple tools to a `.ToolManager`.
+
+ Parameters
+ ----------
+ toolmanager : `.backend_managers.ToolManager`
+ Manager to which the tools are added.
+ tools : {str: class_like}, optional
+ The tools to add in a {name: tool} dict, see `add_tool` for more
+ info.
+ """
+
+ for name, tool in tools.items():
+ toolmanager.add_tool(name, tool)
+
+
+def add_tools_to_container(container, tools=default_toolbar_tools):
+ """
+ Add multiple tools to the container.
+
+ Parameters
+ ----------
+ container : Container
+ `backend_bases.ToolContainerBase` object that will get the tools added.
+ tools : list, optional
+ List in the form ``[[group1, [tool1, tool2 ...]], [group2, [...]]]``
+ where the tools ``[tool1, tool2, ...]`` will display in group1.
+ See `add_tool` for details.
+ """
+
+ for group, grouptools in tools:
+ for position, tool in enumerate(grouptools):
+ container.add_tool(tool, group, position)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/bezier.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/bezier.py
new file mode 100644
index 0000000000000000000000000000000000000000..de9e340d64ab66720316f240395727ce663dff97
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/bezier.py
@@ -0,0 +1,595 @@
+"""
+A module providing some utility functions regarding Bezier path manipulation.
+"""
+
+from functools import lru_cache
+import math
+import warnings
+
+import numpy as np
+
+from matplotlib import _api
+
+
+# same algorithm as 3.8's math.comb
+@np.vectorize
+@lru_cache(maxsize=128)
+def _comb(n, k):
+ if k > n:
+ return 0
+ k = min(k, n - k)
+ i = np.arange(1, k + 1)
+ return np.prod((n + 1 - i)/i).astype(int)
+
+
+class NonIntersectingPathException(ValueError):
+ pass
+
+
+# some functions
+
+
+def get_intersection(cx1, cy1, cos_t1, sin_t1,
+ cx2, cy2, cos_t2, sin_t2):
+ """
+ Return the intersection between the line through (*cx1*, *cy1*) at angle
+ *t1* and the line through (*cx2*, *cy2*) at angle *t2*.
+ """
+
+ # line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
+ # line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
+
+ line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
+ line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
+
+ # rhs matrix
+ a, b = sin_t1, -cos_t1
+ c, d = sin_t2, -cos_t2
+
+ ad_bc = a * d - b * c
+ if abs(ad_bc) < 1e-12:
+ raise ValueError("Given lines do not intersect. Please verify that "
+ "the angles are not equal or differ by 180 degrees.")
+
+ # rhs_inverse
+ a_, b_ = d, -b
+ c_, d_ = -c, a
+ a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]
+
+ x = a_ * line1_rhs + b_ * line2_rhs
+ y = c_ * line1_rhs + d_ * line2_rhs
+
+ return x, y
+
+
+def get_normal_points(cx, cy, cos_t, sin_t, length):
+ """
+ For a line passing through (*cx*, *cy*) and having an angle *t*, return
+ locations of the two points located along its perpendicular line at the
+ distance of *length*.
+ """
+
+ if length == 0.:
+ return cx, cy, cx, cy
+
+ cos_t1, sin_t1 = sin_t, -cos_t
+ cos_t2, sin_t2 = -sin_t, cos_t
+
+ x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy
+ x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy
+
+ return x1, y1, x2, y2
+
+
+# BEZIER routines
+
+# subdividing bezier curve
+# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
+
+
+def _de_casteljau1(beta, t):
+ next_beta = beta[:-1] * (1 - t) + beta[1:] * t
+ return next_beta
+
+
+def split_de_casteljau(beta, t):
+ """
+ Split a Bezier segment defined by its control points *beta* into two
+ separate segments divided at *t* and return their control points.
+ """
+ beta = np.asarray(beta)
+ beta_list = [beta]
+ while True:
+ beta = _de_casteljau1(beta, t)
+ beta_list.append(beta)
+ if len(beta) == 1:
+ break
+ left_beta = [beta[0] for beta in beta_list]
+ right_beta = [beta[-1] for beta in reversed(beta_list)]
+
+ return left_beta, right_beta
+
+
+def find_bezier_t_intersecting_with_closedpath(
+ bezier_point_at_t, inside_closedpath, t0=0., t1=1., tolerance=0.01):
+ """
+ Find the intersection of the Bezier curve with a closed path.
+
+ The intersection point *t* is approximated by two parameters *t0*, *t1*
+ such that *t0* <= *t* <= *t1*.
+
+ Search starts from *t0* and *t1* and uses a simple bisecting algorithm
+ therefore one of the end points must be inside the path while the other
+ doesn't. The search stops when the distance of the points parametrized by
+ *t0* and *t1* gets smaller than the given *tolerance*.
+
+ Parameters
+ ----------
+ bezier_point_at_t : callable
+ A function returning x, y coordinates of the Bezier at parameter *t*.
+ It must have the signature::
+
+ bezier_point_at_t(t: float) -> tuple[float, float]
+
+ inside_closedpath : callable
+ A function returning True if a given point (x, y) is inside the
+ closed path. It must have the signature::
+
+ inside_closedpath(point: tuple[float, float]) -> bool
+
+ t0, t1 : float
+ Start parameters for the search.
+
+ tolerance : float
+ Maximal allowed distance between the final points.
+
+ Returns
+ -------
+ t0, t1 : float
+ The Bezier path parameters.
+ """
+ start = bezier_point_at_t(t0)
+ end = bezier_point_at_t(t1)
+
+ start_inside = inside_closedpath(start)
+ end_inside = inside_closedpath(end)
+
+ if start_inside == end_inside and start != end:
+ raise NonIntersectingPathException(
+ "Both points are on the same side of the closed path")
+
+ while True:
+
+ # return if the distance is smaller than the tolerance
+ if np.hypot(start[0] - end[0], start[1] - end[1]) < tolerance:
+ return t0, t1
+
+ # calculate the middle point
+ middle_t = 0.5 * (t0 + t1)
+ middle = bezier_point_at_t(middle_t)
+ middle_inside = inside_closedpath(middle)
+
+ if start_inside ^ middle_inside:
+ t1 = middle_t
+ end = middle
+ end_inside = middle_inside
+ else:
+ t0 = middle_t
+ start = middle
+ start_inside = middle_inside
+
+
+class BezierSegment:
+ """
+ A d-dimensional Bezier segment.
+
+ Parameters
+ ----------
+ control_points : (N, d) array
+ Location of the *N* control points.
+ """
+
+ def __init__(self, control_points):
+ self._cpoints = np.asarray(control_points)
+ self._N, self._d = self._cpoints.shape
+ self._orders = np.arange(self._N)
+ coeff = [math.factorial(self._N - 1)
+ // (math.factorial(i) * math.factorial(self._N - 1 - i))
+ for i in range(self._N)]
+ self._px = (self._cpoints.T * coeff).T
+
+ def __call__(self, t):
+ """
+ Evaluate the Bezier curve at point(s) t in [0, 1].
+
+ Parameters
+ ----------
+ t : (k,) array-like
+ Points at which to evaluate the curve.
+
+ Returns
+ -------
+ (k, d) array
+ Value of the curve for each point in *t*.
+ """
+ t = np.asarray(t)
+ return (np.power.outer(1 - t, self._orders[::-1])
+ * np.power.outer(t, self._orders)) @ self._px
+
+ def point_at_t(self, t):
+ """
+ Evaluate the curve at a single point, returning a tuple of *d* floats.
+ """
+ return tuple(self(t))
+
+ @property
+ def control_points(self):
+ """The control points of the curve."""
+ return self._cpoints
+
+ @property
+ def dimension(self):
+ """The dimension of the curve."""
+ return self._d
+
+ @property
+ def degree(self):
+ """Degree of the polynomial. One less the number of control points."""
+ return self._N - 1
+
+ @property
+ def polynomial_coefficients(self):
+ r"""
+ The polynomial coefficients of the Bezier curve.
+
+ .. warning:: Follows opposite convention from `numpy.polyval`.
+
+ Returns
+ -------
+ (n+1, d) array
+ Coefficients after expanding in polynomial basis, where :math:`n`
+ is the degree of the bezier curve and :math:`d` its dimension.
+ These are the numbers (:math:`C_j`) such that the curve can be
+ written :math:`\sum_{j=0}^n C_j t^j`.
+
+ Notes
+ -----
+ The coefficients are calculated as
+
+ .. math::
+
+ {n \choose j} \sum_{i=0}^j (-1)^{i+j} {j \choose i} P_i
+
+ where :math:`P_i` are the control points of the curve.
+ """
+ n = self.degree
+ # matplotlib uses n <= 4. overflow plausible starting around n = 15.
+ if n > 10:
+ warnings.warn("Polynomial coefficients formula unstable for high "
+ "order Bezier curves!", RuntimeWarning)
+ P = self.control_points
+ j = np.arange(n+1)[:, None]
+ i = np.arange(n+1)[None, :] # _comb is non-zero for i <= j
+ prefactor = (-1)**(i + j) * _comb(j, i) # j on axis 0, i on axis 1
+ return _comb(n, j) * prefactor @ P # j on axis 0, self.dimension on 1
+
+ def axis_aligned_extrema(self):
+ """
+ Return the dimension and location of the curve's interior extrema.
+
+ The extrema are the points along the curve where one of its partial
+ derivatives is zero.
+
+ Returns
+ -------
+ dims : array of int
+ Index :math:`i` of the partial derivative which is zero at each
+ interior extrema.
+ dzeros : array of float
+ Of same size as dims. The :math:`t` such that :math:`d/dx_i B(t) =
+ 0`
+ """
+ n = self.degree
+ if n <= 1:
+ return np.array([]), np.array([])
+ Cj = self.polynomial_coefficients
+ dCj = np.arange(1, n+1)[:, None] * Cj[1:]
+ dims = []
+ roots = []
+ for i, pi in enumerate(dCj.T):
+ r = np.roots(pi[::-1])
+ roots.append(r)
+ dims.append(np.full_like(r, i))
+ roots = np.concatenate(roots)
+ dims = np.concatenate(dims)
+ in_range = np.isreal(roots) & (roots >= 0) & (roots <= 1)
+ return dims[in_range], np.real(roots)[in_range]
+
+
+def split_bezier_intersecting_with_closedpath(
+ bezier, inside_closedpath, tolerance=0.01):
+ """
+ Split a Bezier curve into two at the intersection with a closed path.
+
+ Parameters
+ ----------
+ bezier : (N, 2) array-like
+ Control points of the Bezier segment. See `.BezierSegment`.
+ inside_closedpath : callable
+ A function returning True if a given point (x, y) is inside the
+ closed path. See also `.find_bezier_t_intersecting_with_closedpath`.
+ tolerance : float
+ The tolerance for the intersection. See also
+ `.find_bezier_t_intersecting_with_closedpath`.
+
+ Returns
+ -------
+ left, right
+ Lists of control points for the two Bezier segments.
+ """
+
+ bz = BezierSegment(bezier)
+ bezier_point_at_t = bz.point_at_t
+
+ t0, t1 = find_bezier_t_intersecting_with_closedpath(
+ bezier_point_at_t, inside_closedpath, tolerance=tolerance)
+
+ _left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.)
+ return _left, _right
+
+
+# matplotlib specific
+
+
+def split_path_inout(path, inside, tolerance=0.01, reorder_inout=False):
+ """
+ Divide a path into two segments at the point where ``inside(x, y)`` becomes
+ False.
+ """
+ from .path import Path
+ path_iter = path.iter_segments()
+
+ ctl_points, command = next(path_iter)
+ begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
+
+ ctl_points_old = ctl_points
+
+ iold = 0
+ i = 1
+
+ for ctl_points, command in path_iter:
+ iold = i
+ i += len(ctl_points) // 2
+ if inside(ctl_points[-2:]) != begin_inside:
+ bezier_path = np.concatenate([ctl_points_old[-2:], ctl_points])
+ break
+ ctl_points_old = ctl_points
+ else:
+ raise ValueError("The path does not intersect with the patch")
+
+ bp = bezier_path.reshape((-1, 2))
+ left, right = split_bezier_intersecting_with_closedpath(
+ bp, inside, tolerance)
+ if len(left) == 2:
+ codes_left = [Path.LINETO]
+ codes_right = [Path.MOVETO, Path.LINETO]
+ elif len(left) == 3:
+ codes_left = [Path.CURVE3, Path.CURVE3]
+ codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
+ elif len(left) == 4:
+ codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
+ codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
+ else:
+ raise AssertionError("This should never be reached")
+
+ verts_left = left[1:]
+ verts_right = right[:]
+
+ if path.codes is None:
+ path_in = Path(np.concatenate([path.vertices[:i], verts_left]))
+ path_out = Path(np.concatenate([verts_right, path.vertices[i:]]))
+
+ else:
+ path_in = Path(np.concatenate([path.vertices[:iold], verts_left]),
+ np.concatenate([path.codes[:iold], codes_left]))
+
+ path_out = Path(np.concatenate([verts_right, path.vertices[i:]]),
+ np.concatenate([codes_right, path.codes[i:]]))
+
+ if reorder_inout and not begin_inside:
+ path_in, path_out = path_out, path_in
+
+ return path_in, path_out
+
+
+def inside_circle(cx, cy, r):
+ """
+ Return a function that checks whether a point is in a circle with center
+ (*cx*, *cy*) and radius *r*.
+
+ The returned function has the signature::
+
+ f(xy: tuple[float, float]) -> bool
+ """
+ r2 = r ** 2
+
+ def _f(xy):
+ x, y = xy
+ return (x - cx) ** 2 + (y - cy) ** 2 < r2
+ return _f
+
+
+# quadratic Bezier lines
+
+def get_cos_sin(x0, y0, x1, y1):
+ dx, dy = x1 - x0, y1 - y0
+ d = (dx * dx + dy * dy) ** .5
+ # Account for divide by zero
+ if d == 0:
+ return 0.0, 0.0
+ return dx / d, dy / d
+
+
+def check_if_parallel(dx1, dy1, dx2, dy2, tolerance=1.e-5):
+ """
+ Check if two lines are parallel.
+
+ Parameters
+ ----------
+ dx1, dy1, dx2, dy2 : float
+ The gradients *dy*/*dx* of the two lines.
+ tolerance : float
+ The angular tolerance in radians up to which the lines are considered
+ parallel.
+
+ Returns
+ -------
+ is_parallel
+ - 1 if two lines are parallel in same direction.
+ - -1 if two lines are parallel in opposite direction.
+ - False otherwise.
+ """
+ theta1 = np.arctan2(dx1, dy1)
+ theta2 = np.arctan2(dx2, dy2)
+ dtheta = abs(theta1 - theta2)
+ if dtheta < tolerance:
+ return 1
+ elif abs(dtheta - np.pi) < tolerance:
+ return -1
+ else:
+ return False
+
+
+def get_parallels(bezier2, width):
+ """
+ Given the quadratic Bezier control points *bezier2*, returns
+ control points of quadratic Bezier lines roughly parallel to given
+ one separated by *width*.
+ """
+
+ # The parallel Bezier lines are constructed by following ways.
+ # c1 and c2 are control points representing the begin and end of the
+ # Bezier line.
+ # cm is the middle point
+
+ c1x, c1y = bezier2[0]
+ cmx, cmy = bezier2[1]
+ c2x, c2y = bezier2[2]
+
+ parallel_test = check_if_parallel(c1x - cmx, c1y - cmy,
+ cmx - c2x, cmy - c2y)
+
+ if parallel_test == -1:
+ _api.warn_external(
+ "Lines do not intersect. A straight line is used instead.")
+ cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y)
+ cos_t2, sin_t2 = cos_t1, sin_t1
+ else:
+ # t1 and t2 is the angle between c1 and cm, cm, c2. They are
+ # also a angle of the tangential line of the path at c1 and c2
+ cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
+ cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
+
+ # find c1_left, c1_right which are located along the lines
+ # through c1 and perpendicular to the tangential lines of the
+ # Bezier path at a distance of width. Same thing for c2_left and
+ # c2_right with respect to c2.
+ c1x_left, c1y_left, c1x_right, c1y_right = (
+ get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
+ )
+ c2x_left, c2y_left, c2x_right, c2y_right = (
+ get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
+ )
+
+ # find cm_left which is the intersecting point of a line through
+ # c1_left with angle t1 and a line through c2_left with angle
+ # t2. Same with cm_right.
+ try:
+ cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1,
+ sin_t1, c2x_left, c2y_left,
+ cos_t2, sin_t2)
+ cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1,
+ sin_t1, c2x_right, c2y_right,
+ cos_t2, sin_t2)
+ except ValueError:
+ # Special case straight lines, i.e., angle between two lines is
+ # less than the threshold used by get_intersection (we don't use
+ # check_if_parallel as the threshold is not the same).
+ cmx_left, cmy_left = (
+ 0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left)
+ )
+ cmx_right, cmy_right = (
+ 0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right)
+ )
+
+ # the parallel Bezier lines are created with control points of
+ # [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
+ path_left = [(c1x_left, c1y_left),
+ (cmx_left, cmy_left),
+ (c2x_left, c2y_left)]
+ path_right = [(c1x_right, c1y_right),
+ (cmx_right, cmy_right),
+ (c2x_right, c2y_right)]
+
+ return path_left, path_right
+
+
+def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
+ """
+ Find control points of the Bezier curve passing through (*c1x*, *c1y*),
+ (*mmx*, *mmy*), and (*c2x*, *c2y*), at parametric values 0, 0.5, and 1.
+ """
+ cmx = .5 * (4 * mmx - (c1x + c2x))
+ cmy = .5 * (4 * mmy - (c1y + c2y))
+ return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
+
+
+def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
+ """
+ Being similar to get_parallels, returns control points of two quadratic
+ Bezier lines having a width roughly parallel to given one separated by
+ *width*.
+ """
+
+ # c1, cm, c2
+ c1x, c1y = bezier2[0]
+ cmx, cmy = bezier2[1]
+ c3x, c3y = bezier2[2]
+
+ # t1 and t2 is the angle between c1 and cm, cm, c3.
+ # They are also a angle of the tangential line of the path at c1 and c3
+ cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
+ cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
+
+ # find c1_left, c1_right which are located along the lines
+ # through c1 and perpendicular to the tangential lines of the
+ # Bezier path at a distance of width. Same thing for c3_left and
+ # c3_right with respect to c3.
+ c1x_left, c1y_left, c1x_right, c1y_right = (
+ get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1)
+ )
+ c3x_left, c3y_left, c3x_right, c3y_right = (
+ get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2)
+ )
+
+ # find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and
+ # c12-c23
+ c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5
+ c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5
+ c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5
+
+ # tangential angle of c123 (angle between c12 and c23)
+ cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
+
+ c123x_left, c123y_left, c123x_right, c123y_right = (
+ get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm)
+ )
+
+ path_left = find_control_points(c1x_left, c1y_left,
+ c123x_left, c123y_left,
+ c3x_left, c3y_left)
+ path_right = find_control_points(c1x_right, c1y_right,
+ c123x_right, c123y_right,
+ c3x_right, c3y_right)
+
+ return path_left, path_right
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/contour.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/contour.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1b5fc3cf728ed04d2532f16464279b3f1171082
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/contour.py
@@ -0,0 +1,1799 @@
+"""
+Classes to support contour plotting and labelling for the Axes class.
+"""
+
+import functools
+from numbers import Integral
+
+import numpy as np
+from numpy import ma
+
+import matplotlib as mpl
+from matplotlib import _api, docstring
+from matplotlib.backend_bases import MouseButton
+import matplotlib.path as mpath
+import matplotlib.ticker as ticker
+import matplotlib.cm as cm
+import matplotlib.colors as mcolors
+import matplotlib.collections as mcoll
+import matplotlib.font_manager as font_manager
+import matplotlib.text as text
+import matplotlib.cbook as cbook
+import matplotlib.patches as mpatches
+import matplotlib.transforms as mtransforms
+
+
+# We can't use a single line collection for contour because a line
+# collection can have only a single line style, and we want to be able to have
+# dashed negative contours, for example, and solid positive contours.
+# We could use a single polygon collection for filled contours, but it
+# seems better to keep line and filled contours similar, with one collection
+# per level.
+
+
+class ClabelText(text.Text):
+ """
+ Unlike the ordinary text, the get_rotation returns an updated
+ angle in the pixel coordinate assuming that the input rotation is
+ an angle in data coordinate (or whatever transform set).
+ """
+
+ def get_rotation(self):
+ new_angle, = self.get_transform().transform_angles(
+ [super().get_rotation()], [self.get_position()])
+ return new_angle
+
+
+def _contour_labeler_event_handler(cs, inline, inline_spacing, event):
+ canvas = cs.axes.figure.canvas
+ is_button = event.name == "button_press_event"
+ is_key = event.name == "key_press_event"
+ # Quit (even if not in infinite mode; this is consistent with
+ # MATLAB and sometimes quite useful, but will require the user to
+ # test how many points were actually returned before using data).
+ if (is_button and event.button == MouseButton.MIDDLE
+ or is_key and event.key in ["escape", "enter"]):
+ canvas.stop_event_loop()
+ # Pop last click.
+ elif (is_button and event.button == MouseButton.RIGHT
+ or is_key and event.key in ["backspace", "delete"]):
+ # Unfortunately, if one is doing inline labels, then there is currently
+ # no way to fix the broken contour - once humpty-dumpty is broken, he
+ # can't be put back together. In inline mode, this does nothing.
+ if not inline:
+ cs.pop_label()
+ canvas.draw()
+ # Add new click.
+ elif (is_button and event.button == MouseButton.LEFT
+ # On macOS/gtk, some keys return None.
+ or is_key and event.key is not None):
+ if event.inaxes == cs.axes:
+ cs.add_label_near(event.x, event.y, transform=False,
+ inline=inline, inline_spacing=inline_spacing)
+ canvas.draw()
+
+
+class ContourLabeler:
+ """Mixin to provide labelling capability to `.ContourSet`."""
+
+ def clabel(self, levels=None, *,
+ fontsize=None, inline=True, inline_spacing=5, fmt=None,
+ colors=None, use_clabeltext=False, manual=False,
+ rightside_up=True, zorder=None):
+ """
+ Label a contour plot.
+
+ Adds labels to line contours in this `.ContourSet` (which inherits from
+ this mixin class).
+
+ Parameters
+ ----------
+ levels : array-like, optional
+ A list of level values, that should be labeled. The list must be
+ a subset of ``cs.levels``. If not given, all levels are labeled.
+
+ fontsize : str or float, default: :rc:`font.size`
+ Size in points or relative size e.g., 'smaller', 'x-large'.
+ See `.Text.set_size` for accepted string values.
+
+ colors : color or colors or None, default: None
+ The label colors:
+
+ - If *None*, the color of each label matches the color of
+ the corresponding contour.
+
+ - If one string color, e.g., *colors* = 'r' or *colors* =
+ 'red', all labels will be plotted in this color.
+
+ - If a tuple of colors (string, float, rgb, etc), different labels
+ will be plotted in different colors in the order specified.
+
+ inline : bool, default: True
+ If ``True`` the underlying contour is removed where the label is
+ placed.
+
+ inline_spacing : float, default: 5
+ Space in pixels to leave on each side of label when placing inline.
+
+ This spacing will be exact for labels at locations where the
+ contour is straight, less so for labels on curved contours.
+
+ fmt : `.Formatter` or str or callable or dict, optional
+ How the levels are formatted:
+
+ - If a `.Formatter`, it is used to format all levels at once, using
+ its `.Formatter.format_ticks` method.
+ - If a str, it is interpreted as a %-style format string.
+ - If a callable, it is called with one level at a time and should
+ return the corresponding label.
+ - If a dict, it should directly map levels to labels.
+
+ The default is to use a standard `.ScalarFormatter`.
+
+ manual : bool or iterable, default: False
+ If ``True``, contour labels will be placed manually using
+ mouse clicks. Click the first button near a contour to
+ add a label, click the second button (or potentially both
+ mouse buttons at once) to finish adding labels. The third
+ button can be used to remove the last label added, but
+ only if labels are not inline. Alternatively, the keyboard
+ can be used to select label locations (enter to end label
+ placement, delete or backspace act like the third mouse button,
+ and any other key will select a label location).
+
+ *manual* can also be an iterable object of (x, y) tuples.
+ Contour labels will be created as if mouse is clicked at each
+ (x, y) position.
+
+ rightside_up : bool, default: True
+ If ``True``, label rotations will always be plus
+ or minus 90 degrees from level.
+
+ use_clabeltext : bool, default: False
+ If ``True``, `.ClabelText` class (instead of `.Text`) is used to
+ create labels. `ClabelText` recalculates rotation angles
+ of texts during the drawing time, therefore this can be used if
+ aspect of the axes changes.
+
+ zorder : float or None, default: ``(2 + contour.get_zorder())``
+ zorder of the contour labels.
+
+ Returns
+ -------
+ labels
+ A list of `.Text` instances for the labels.
+ """
+
+ # clabel basically takes the input arguments and uses them to
+ # add a list of "label specific" attributes to the ContourSet
+ # object. These attributes are all of the form label* and names
+ # should be fairly self explanatory.
+ #
+ # Once these attributes are set, clabel passes control to the
+ # labels method (case of automatic label placement) or
+ # `BlockingContourLabeler` (case of manual label placement).
+
+ if fmt is None:
+ fmt = ticker.ScalarFormatter(useOffset=False)
+ fmt.create_dummy_axis()
+ self.labelFmt = fmt
+ self._use_clabeltext = use_clabeltext
+ # Detect if manual selection is desired and remove from argument list.
+ self.labelManual = manual
+ self.rightside_up = rightside_up
+ if zorder is None:
+ self._clabel_zorder = 2+self._contour_zorder
+ else:
+ self._clabel_zorder = zorder
+
+ if levels is None:
+ levels = self.levels
+ indices = list(range(len(self.cvalues)))
+ else:
+ levlabs = list(levels)
+ indices, levels = [], []
+ for i, lev in enumerate(self.levels):
+ if lev in levlabs:
+ indices.append(i)
+ levels.append(lev)
+ if len(levels) < len(levlabs):
+ raise ValueError(f"Specified levels {levlabs} don't match "
+ f"available levels {self.levels}")
+ self.labelLevelList = levels
+ self.labelIndiceList = indices
+
+ self.labelFontProps = font_manager.FontProperties()
+ self.labelFontProps.set_size(fontsize)
+ font_size_pts = self.labelFontProps.get_size_in_points()
+ self.labelFontSizeList = [font_size_pts] * len(levels)
+
+ if colors is None:
+ self.labelMappable = self
+ self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
+ else:
+ cmap = mcolors.ListedColormap(colors, N=len(self.labelLevelList))
+ self.labelCValueList = list(range(len(self.labelLevelList)))
+ self.labelMappable = cm.ScalarMappable(cmap=cmap,
+ norm=mcolors.NoNorm())
+
+ self.labelXYs = []
+
+ if np.iterable(self.labelManual):
+ for x, y in self.labelManual:
+ self.add_label_near(x, y, inline, inline_spacing)
+ elif self.labelManual:
+ print('Select label locations manually using first mouse button.')
+ print('End manual selection with second mouse button.')
+ if not inline:
+ print('Remove last label by clicking third mouse button.')
+ mpl._blocking_input.blocking_input_loop(
+ self.axes.figure, ["button_press_event", "key_press_event"],
+ timeout=-1, handler=functools.partial(
+ _contour_labeler_event_handler,
+ self, inline, inline_spacing))
+ else:
+ self.labels(inline, inline_spacing)
+
+ self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
+ return self.labelTextsList
+
+ def print_label(self, linecontour, labelwidth):
+ """Return whether a contour is long enough to hold a label."""
+ return (len(linecontour) > 10 * labelwidth
+ or (np.ptp(linecontour, axis=0) > 1.2 * labelwidth).any())
+
+ def too_close(self, x, y, lw):
+ """Return whether a label is already near this location."""
+ thresh = (1.2 * lw) ** 2
+ return any((x - loc[0]) ** 2 + (y - loc[1]) ** 2 < thresh
+ for loc in self.labelXYs)
+
+ @_api.deprecated("3.4")
+ def get_label_coords(self, distances, XX, YY, ysize, lw):
+ """
+ Return x, y, and the index of a label location.
+
+ Labels are plotted at a location with the smallest
+ deviation of the contour from a straight line
+ unless there is another label nearby, in which case
+ the next best place on the contour is picked up.
+ If all such candidates are rejected, the beginning
+ of the contour is chosen.
+ """
+ hysize = int(ysize / 2)
+ adist = np.argsort(distances)
+
+ for ind in adist:
+ x, y = XX[ind][hysize], YY[ind][hysize]
+ if self.too_close(x, y, lw):
+ continue
+ return x, y, ind
+
+ ind = adist[0]
+ x, y = XX[ind][hysize], YY[ind][hysize]
+ return x, y, ind
+
+ def _get_nth_label_width(self, nth):
+ """Return the width of the *nth* label, in pixels."""
+ fig = self.axes.figure
+ return (
+ text.Text(0, 0,
+ self.get_text(self.labelLevelList[nth], self.labelFmt),
+ figure=fig,
+ size=self.labelFontSizeList[nth],
+ fontproperties=self.labelFontProps)
+ .get_window_extent(mpl.tight_layout.get_renderer(fig)).width)
+
+ @_api.deprecated("3.5")
+ def get_label_width(self, lev, fmt, fsize):
+ """Return the width of the label in points."""
+ if not isinstance(lev, str):
+ lev = self.get_text(lev, fmt)
+ fig = self.axes.figure
+ width = (text.Text(0, 0, lev, figure=fig,
+ size=fsize, fontproperties=self.labelFontProps)
+ .get_window_extent(mpl.tight_layout.get_renderer(fig)).width)
+ width *= 72 / fig.dpi
+ return width
+
+ def set_label_props(self, label, text, color):
+ """Set the label properties - color, fontsize, text."""
+ label.set_text(text)
+ label.set_color(color)
+ label.set_fontproperties(self.labelFontProps)
+ label.set_clip_box(self.axes.bbox)
+
+ def get_text(self, lev, fmt):
+ """Get the text of the label."""
+ if isinstance(lev, str):
+ return lev
+ elif isinstance(fmt, dict):
+ return fmt.get(lev, '%1.3f')
+ elif callable(getattr(fmt, "format_ticks", None)):
+ return fmt.format_ticks([*self.labelLevelList, lev])[-1]
+ elif callable(fmt):
+ return fmt(lev)
+ else:
+ return fmt % lev
+
+ def locate_label(self, linecontour, labelwidth):
+ """
+ Find good place to draw a label (relatively flat part of the contour).
+ """
+ ctr_size = len(linecontour)
+ n_blocks = int(np.ceil(ctr_size / labelwidth)) if labelwidth > 1 else 1
+ block_size = ctr_size if n_blocks == 1 else int(labelwidth)
+ # Split contour into blocks of length ``block_size``, filling the last
+ # block by cycling the contour start (per `np.resize` semantics). (Due
+ # to cycling, the index returned is taken modulo ctr_size.)
+ xx = np.resize(linecontour[:, 0], (n_blocks, block_size))
+ yy = np.resize(linecontour[:, 1], (n_blocks, block_size))
+ yfirst = yy[:, :1]
+ ylast = yy[:, -1:]
+ xfirst = xx[:, :1]
+ xlast = xx[:, -1:]
+ s = (yfirst - yy) * (xlast - xfirst) - (xfirst - xx) * (ylast - yfirst)
+ l = np.hypot(xlast - xfirst, ylast - yfirst)
+ # Ignore warning that divide by zero throws, as this is a valid option
+ with np.errstate(divide='ignore', invalid='ignore'):
+ distances = (abs(s) / l).sum(axis=-1)
+ # Labels are drawn in the middle of the block (``hbsize``) where the
+ # contour is the closest (per ``distances``) to a straight line, but
+ # not `too_close()` to a preexisting label.
+ hbsize = block_size // 2
+ adist = np.argsort(distances)
+ # If all candidates are `too_close()`, go back to the straightest part
+ # (``adist[0]``).
+ for idx in np.append(adist, adist[0]):
+ x, y = xx[idx, hbsize], yy[idx, hbsize]
+ if not self.too_close(x, y, labelwidth):
+ break
+ return x, y, (idx * block_size + hbsize) % ctr_size
+
+ def calc_label_rot_and_inline(self, slc, ind, lw, lc=None, spacing=5):
+ """
+ Calculate the appropriate label rotation given the linecontour
+ coordinates in screen units, the index of the label location and the
+ label width.
+
+ If *lc* is not None or empty, also break contours and compute
+ inlining.
+
+ *spacing* is the empty space to leave around the label, in pixels.
+
+ Both tasks are done together to avoid calculating path lengths
+ multiple times, which is relatively costly.
+
+ The method used here involves computing the path length along the
+ contour in pixel coordinates and then looking approximately (label
+ width / 2) away from central point to determine rotation and then to
+ break contour if desired.
+ """
+
+ if lc is None:
+ lc = []
+ # Half the label width
+ hlw = lw / 2.0
+
+ # Check if closed and, if so, rotate contour so label is at edge
+ closed = _is_closed_polygon(slc)
+ if closed:
+ slc = np.concatenate([slc[ind:-1], slc[:ind + 1]])
+ if len(lc): # Rotate lc also if not empty
+ lc = np.concatenate([lc[ind:-1], lc[:ind + 1]])
+ ind = 0
+
+ # Calculate path lengths
+ pl = np.zeros(slc.shape[0], dtype=float)
+ dx = np.diff(slc, axis=0)
+ pl[1:] = np.cumsum(np.hypot(dx[:, 0], dx[:, 1]))
+ pl = pl - pl[ind]
+
+ # Use linear interpolation to get points around label
+ xi = np.array([-hlw, hlw])
+ if closed: # Look at end also for closed contours
+ dp = np.array([pl[-1], 0])
+ else:
+ dp = np.zeros_like(xi)
+
+ # Get angle of vector between the two ends of the label - must be
+ # calculated in pixel space for text rotation to work correctly.
+ (dx,), (dy,) = (np.diff(np.interp(dp + xi, pl, slc_col))
+ for slc_col in slc.T)
+ rotation = np.rad2deg(np.arctan2(dy, dx))
+
+ if self.rightside_up:
+ # Fix angle so text is never upside-down
+ rotation = (rotation + 90) % 180 - 90
+
+ # Break contour if desired
+ nlc = []
+ if len(lc):
+ # Expand range by spacing
+ xi = dp + xi + np.array([-spacing, spacing])
+
+ # Get (integer) indices near points of interest; use -1 as marker
+ # for out of bounds.
+ I = np.interp(xi, pl, np.arange(len(pl)), left=-1, right=-1)
+ I = [np.floor(I[0]).astype(int), np.ceil(I[1]).astype(int)]
+ if I[0] != -1:
+ xy1 = [np.interp(xi[0], pl, lc_col) for lc_col in lc.T]
+ if I[1] != -1:
+ xy2 = [np.interp(xi[1], pl, lc_col) for lc_col in lc.T]
+
+ # Actually break contours
+ if closed:
+ # This will remove contour if shorter than label
+ if all(i != -1 for i in I):
+ nlc.append(np.row_stack([xy2, lc[I[1]:I[0]+1], xy1]))
+ else:
+ # These will remove pieces of contour if they have length zero
+ if I[0] != -1:
+ nlc.append(np.row_stack([lc[:I[0]+1], xy1]))
+ if I[1] != -1:
+ nlc.append(np.row_stack([xy2, lc[I[1]:]]))
+
+ # The current implementation removes contours completely
+ # covered by labels. Uncomment line below to keep
+ # original contour if this is the preferred behavior.
+ # if not len(nlc): nlc = [ lc ]
+
+ return rotation, nlc
+
+ def _get_label_text(self, x, y, rotation):
+ dx, dy = self.axes.transData.inverted().transform((x, y))
+ t = text.Text(dx, dy, rotation=rotation,
+ horizontalalignment='center',
+ verticalalignment='center', zorder=self._clabel_zorder)
+ return t
+
+ def _get_label_clabeltext(self, x, y, rotation):
+ # x, y, rotation is given in pixel coordinate. Convert them to
+ # the data coordinate and create a label using ClabelText
+ # class. This way, the rotation of the clabel is along the
+ # contour line always.
+ transDataInv = self.axes.transData.inverted()
+ dx, dy = transDataInv.transform((x, y))
+ drotation = transDataInv.transform_angles(np.array([rotation]),
+ np.array([[x, y]]))
+ t = ClabelText(dx, dy, rotation=drotation[0],
+ horizontalalignment='center',
+ verticalalignment='center', zorder=self._clabel_zorder)
+
+ return t
+
+ def _add_label(self, t, x, y, lev, cvalue):
+ color = self.labelMappable.to_rgba(cvalue, alpha=self.alpha)
+
+ _text = self.get_text(lev, self.labelFmt)
+ self.set_label_props(t, _text, color)
+ self.labelTexts.append(t)
+ self.labelCValues.append(cvalue)
+ self.labelXYs.append((x, y))
+
+ # Add label to plot here - useful for manual mode label selection
+ self.axes.add_artist(t)
+
+ def add_label(self, x, y, rotation, lev, cvalue):
+ """
+ Add contour label using :class:`~matplotlib.text.Text` class.
+ """
+ t = self._get_label_text(x, y, rotation)
+ self._add_label(t, x, y, lev, cvalue)
+
+ def add_label_clabeltext(self, x, y, rotation, lev, cvalue):
+ """
+ Add contour label using :class:`ClabelText` class.
+ """
+ # x, y, rotation is given in pixel coordinate. Convert them to
+ # the data coordinate and create a label using ClabelText
+ # class. This way, the rotation of the clabel is along the
+ # contour line always.
+ t = self._get_label_clabeltext(x, y, rotation)
+ self._add_label(t, x, y, lev, cvalue)
+
+ def add_label_near(self, x, y, inline=True, inline_spacing=5,
+ transform=None):
+ """
+ Add a label near the point ``(x, y)``.
+
+ Parameters
+ ----------
+ x, y : float
+ The approximate location of the label.
+ inline : bool, default: True
+ If *True* remove the segment of the contour beneath the label.
+ inline_spacing : int, default: 5
+ Space in pixels to leave on each side of label when placing
+ inline. This spacing will be exact for labels at locations where
+ the contour is straight, less so for labels on curved contours.
+ transform : `.Transform` or `False`, default: ``self.axes.transData``
+ A transform applied to ``(x, y)`` before labeling. The default
+ causes ``(x, y)`` to be interpreted as data coordinates. `False`
+ is a synonym for `.IdentityTransform`; i.e. ``(x, y)`` should be
+ interpreted as display coordinates.
+ """
+
+ if transform is None:
+ transform = self.axes.transData
+ if transform:
+ x, y = transform.transform((x, y))
+
+ # find the nearest contour _in screen units_
+ conmin, segmin, imin, xmin, ymin = self.find_nearest_contour(
+ x, y, self.labelIndiceList)[:5]
+
+ # calc_label_rot_and_inline() requires that (xmin, ymin)
+ # be a vertex in the path. So, if it isn't, add a vertex here
+ paths = self.collections[conmin].get_paths() # paths of correct coll.
+ lc = paths[segmin].vertices # vertices of correct segment
+ # Where should the new vertex be added in data-units?
+ xcmin = self.axes.transData.inverted().transform([xmin, ymin])
+ if not np.allclose(xcmin, lc[imin]):
+ # No vertex is close enough, so add a new point in the vertices and
+ # replace the path by the new one.
+ lc = np.insert(lc, imin, xcmin, axis=0)
+ paths[segmin] = mpath.Path(lc)
+
+ # Get index of nearest level in subset of levels used for labeling
+ lmin = self.labelIndiceList.index(conmin)
+
+ # Get label width for rotating labels and breaking contours
+ lw = self._get_nth_label_width(lmin)
+
+ # Figure out label rotation.
+ rotation, nlc = self.calc_label_rot_and_inline(
+ self.axes.transData.transform(lc), # to pixel space.
+ imin, lw, lc if inline else None, inline_spacing)
+
+ self.add_label(xmin, ymin, rotation, self.labelLevelList[lmin],
+ self.labelCValueList[lmin])
+
+ if inline:
+ # Remove old, not looping over paths so we can do this up front
+ paths.pop(segmin)
+
+ # Add paths if not empty or single point
+ for n in nlc:
+ if len(n) > 1:
+ paths.append(mpath.Path(n))
+
+ def pop_label(self, index=-1):
+ """Defaults to removing last label, but any index can be supplied"""
+ self.labelCValues.pop(index)
+ t = self.labelTexts.pop(index)
+ t.remove()
+
+ def labels(self, inline, inline_spacing):
+
+ if self._use_clabeltext:
+ add_label = self.add_label_clabeltext
+ else:
+ add_label = self.add_label
+
+ for idx, (icon, lev, cvalue) in enumerate(zip(
+ self.labelIndiceList,
+ self.labelLevelList,
+ self.labelCValueList,
+ )):
+
+ con = self.collections[icon]
+ trans = con.get_transform()
+ lw = self._get_nth_label_width(idx)
+ additions = []
+ paths = con.get_paths()
+ for segNum, linepath in enumerate(paths):
+ lc = linepath.vertices # Line contour
+ slc = trans.transform(lc) # Line contour in screen coords
+
+ # Check if long enough for a label
+ if self.print_label(slc, lw):
+ x, y, ind = self.locate_label(slc, lw)
+
+ rotation, new = self.calc_label_rot_and_inline(
+ slc, ind, lw, lc if inline else None, inline_spacing)
+
+ # Actually add the label
+ add_label(x, y, rotation, lev, cvalue)
+
+ # If inline, add new contours
+ if inline:
+ for n in new:
+ # Add path if not empty or single point
+ if len(n) > 1:
+ additions.append(mpath.Path(n))
+ else: # If not adding label, keep old path
+ additions.append(linepath)
+
+ # After looping over all segments on a contour, replace old paths
+ # by new ones if inlining.
+ if inline:
+ paths[:] = additions
+
+
+def _is_closed_polygon(X):
+ """
+ Return whether first and last object in a sequence are the same. These are
+ presumably coordinates on a polygonal curve, in which case this function
+ tests if that curve is closed.
+ """
+ return np.allclose(X[0], X[-1], rtol=1e-10, atol=1e-13)
+
+
+def _find_closest_point_on_path(xys, p):
+ """
+ Parameters
+ ----------
+ xys : (N, 2) array-like
+ Coordinates of vertices.
+ p : (float, float)
+ Coordinates of point.
+
+ Returns
+ -------
+ d2min : float
+ Minimum square distance of *p* to *xys*.
+ proj : (float, float)
+ Projection of *p* onto *xys*.
+ imin : (int, int)
+ Consecutive indices of vertices of segment in *xys* where *proj* is.
+ Segments are considered as including their end-points; i.e if the
+ closest point on the path is a node in *xys* with index *i*, this
+ returns ``(i-1, i)``. For the special case where *xys* is a single
+ point, this returns ``(0, 0)``.
+ """
+ if len(xys) == 1:
+ return (((p - xys[0]) ** 2).sum(), xys[0], (0, 0))
+ dxys = xys[1:] - xys[:-1] # Individual segment vectors.
+ norms = (dxys ** 2).sum(axis=1)
+ norms[norms == 0] = 1 # For zero-length segment, replace 0/0 by 0/1.
+ rel_projs = np.clip( # Project onto each segment in relative 0-1 coords.
+ ((p - xys[:-1]) * dxys).sum(axis=1) / norms,
+ 0, 1)[:, None]
+ projs = xys[:-1] + rel_projs * dxys # Projs. onto each segment, in (x, y).
+ d2s = ((projs - p) ** 2).sum(axis=1) # Squared distances.
+ imin = np.argmin(d2s)
+ return (d2s[imin], projs[imin], (imin, imin+1))
+
+
+docstring.interpd.update(contour_set_attributes=r"""
+Attributes
+----------
+ax : `~matplotlib.axes.Axes`
+ The Axes object in which the contours are drawn.
+
+collections : `.silent_list` of `.PathCollection`\s
+ The `.Artist`\s representing the contour. This is a list of
+ `.PathCollection`\s for both line and filled contours.
+
+levels : array
+ The values of the contour levels.
+
+layers : array
+ Same as levels for line contours; half-way between
+ levels for filled contours. See ``ContourSet._process_colors``.
+""")
+
+
+@docstring.dedent_interpd
+class ContourSet(cm.ScalarMappable, ContourLabeler):
+ """
+ Store a set of contour lines or filled regions.
+
+ User-callable method: `~.Axes.clabel`
+
+ Parameters
+ ----------
+ ax : `~.axes.Axes`
+
+ levels : [level0, level1, ..., leveln]
+ A list of floating point numbers indicating the contour levels.
+
+ allsegs : [level0segs, level1segs, ...]
+ List of all the polygon segments for all the *levels*.
+ For contour lines ``len(allsegs) == len(levels)``, and for
+ filled contour regions ``len(allsegs) = len(levels)-1``. The lists
+ should look like ::
+
+ level0segs = [polygon0, polygon1, ...]
+ polygon0 = [[x0, y0], [x1, y1], ...]
+
+ allkinds : ``None`` or [level0kinds, level1kinds, ...]
+ Optional list of all the polygon vertex kinds (code types), as
+ described and used in Path. This is used to allow multiply-
+ connected paths such as holes within filled polygons.
+ If not ``None``, ``len(allkinds) == len(allsegs)``. The lists
+ should look like ::
+
+ level0kinds = [polygon0kinds, ...]
+ polygon0kinds = [vertexcode0, vertexcode1, ...]
+
+ If *allkinds* is not ``None``, usually all polygons for a
+ particular contour level are grouped together so that
+ ``level0segs = [polygon0]`` and ``level0kinds = [polygon0kinds]``.
+
+ **kwargs
+ Keyword arguments are as described in the docstring of
+ `~.Axes.contour`.
+
+ %(contour_set_attributes)s
+ """
+
+ def __init__(self, ax, *args,
+ levels=None, filled=False, linewidths=None, linestyles=None,
+ hatches=(None,), alpha=None, origin=None, extent=None,
+ cmap=None, colors=None, norm=None, vmin=None, vmax=None,
+ extend='neither', antialiased=None, nchunk=0, locator=None,
+ transform=None,
+ **kwargs):
+ """
+ Draw contour lines or filled regions, depending on
+ whether keyword arg *filled* is ``False`` (default) or ``True``.
+
+ Call signature::
+
+ ContourSet(ax, levels, allsegs, [allkinds], **kwargs)
+
+ Parameters
+ ----------
+ ax : `~.axes.Axes`
+ The `~.axes.Axes` object to draw on.
+
+ levels : [level0, level1, ..., leveln]
+ A list of floating point numbers indicating the contour
+ levels.
+
+ allsegs : [level0segs, level1segs, ...]
+ List of all the polygon segments for all the *levels*.
+ For contour lines ``len(allsegs) == len(levels)``, and for
+ filled contour regions ``len(allsegs) = len(levels)-1``. The lists
+ should look like ::
+
+ level0segs = [polygon0, polygon1, ...]
+ polygon0 = [[x0, y0], [x1, y1], ...]
+
+ allkinds : [level0kinds, level1kinds, ...], optional
+ Optional list of all the polygon vertex kinds (code types), as
+ described and used in Path. This is used to allow multiply-
+ connected paths such as holes within filled polygons.
+ If not ``None``, ``len(allkinds) == len(allsegs)``. The lists
+ should look like ::
+
+ level0kinds = [polygon0kinds, ...]
+ polygon0kinds = [vertexcode0, vertexcode1, ...]
+
+ If *allkinds* is not ``None``, usually all polygons for a
+ particular contour level are grouped together so that
+ ``level0segs = [polygon0]`` and ``level0kinds = [polygon0kinds]``.
+
+ **kwargs
+ Keyword arguments are as described in the docstring of
+ `~.Axes.contour`.
+ """
+ self.axes = ax
+ self.levels = levels
+ self.filled = filled
+ self.linewidths = linewidths
+ self.linestyles = linestyles
+ self.hatches = hatches
+ self.alpha = alpha
+ self.origin = origin
+ self.extent = extent
+ self.colors = colors
+ self.extend = extend
+ self.antialiased = antialiased
+ if self.antialiased is None and self.filled:
+ # Eliminate artifacts; we are not stroking the boundaries.
+ self.antialiased = False
+ # The default for line contours will be taken from the
+ # LineCollection default, which uses :rc:`lines.antialiased`.
+
+ self.nchunk = nchunk
+ self.locator = locator
+ if (isinstance(norm, mcolors.LogNorm)
+ or isinstance(self.locator, ticker.LogLocator)):
+ self.logscale = True
+ if norm is None:
+ norm = mcolors.LogNorm()
+ else:
+ self.logscale = False
+
+ _api.check_in_list([None, 'lower', 'upper', 'image'], origin=origin)
+ if self.extent is not None and len(self.extent) != 4:
+ raise ValueError(
+ "If given, 'extent' must be None or (x0, x1, y0, y1)")
+ if self.colors is not None and cmap is not None:
+ raise ValueError('Either colors or cmap must be None')
+ if self.origin == 'image':
+ self.origin = mpl.rcParams['image.origin']
+
+ self._transform = transform
+
+ kwargs = self._process_args(*args, **kwargs)
+ self._process_levels()
+
+ self._extend_min = self.extend in ['min', 'both']
+ self._extend_max = self.extend in ['max', 'both']
+ if self.colors is not None:
+ ncolors = len(self.levels)
+ if self.filled:
+ ncolors -= 1
+ i0 = 0
+
+ # Handle the case where colors are given for the extended
+ # parts of the contour.
+
+ use_set_under_over = False
+ # if we are extending the lower end, and we've been given enough
+ # colors then skip the first color in the resulting cmap. For the
+ # extend_max case we don't need to worry about passing more colors
+ # than ncolors as ListedColormap will clip.
+ total_levels = (ncolors +
+ int(self._extend_min) +
+ int(self._extend_max))
+ if (len(self.colors) == total_levels and
+ (self._extend_min or self._extend_max)):
+ use_set_under_over = True
+ if self._extend_min:
+ i0 = 1
+
+ cmap = mcolors.ListedColormap(self.colors[i0:None], N=ncolors)
+
+ if use_set_under_over:
+ if self._extend_min:
+ cmap.set_under(self.colors[0])
+ if self._extend_max:
+ cmap.set_over(self.colors[-1])
+
+ self.collections = cbook.silent_list(None)
+
+ # label lists must be initialized here
+ self.labelTexts = []
+ self.labelCValues = []
+
+ kw = {'cmap': cmap}
+ if norm is not None:
+ kw['norm'] = norm
+ # sets self.cmap, norm if needed;
+ cm.ScalarMappable.__init__(self, **kw)
+ if vmin is not None:
+ self.norm.vmin = vmin
+ if vmax is not None:
+ self.norm.vmax = vmax
+ self._process_colors()
+
+ if getattr(self, 'allsegs', None) is None:
+ self.allsegs, self.allkinds = self._get_allsegs_and_allkinds()
+ elif self.allkinds is None:
+ # allsegs specified in constructor may or may not have allkinds as
+ # well. Must ensure allkinds can be zipped below.
+ self.allkinds = [None] * len(self.allsegs)
+
+ if self.filled:
+ if self.linewidths is not None:
+ _api.warn_external('linewidths is ignored by contourf')
+ # Lower and upper contour levels.
+ lowers, uppers = self._get_lowers_and_uppers()
+ # Default zorder taken from Collection
+ self._contour_zorder = kwargs.pop('zorder', 1)
+
+ self.collections[:] = [
+ mcoll.PathCollection(
+ self._make_paths(segs, kinds),
+ antialiaseds=(self.antialiased,),
+ edgecolors='none',
+ alpha=self.alpha,
+ transform=self.get_transform(),
+ zorder=self._contour_zorder)
+ for level, level_upper, segs, kinds
+ in zip(lowers, uppers, self.allsegs, self.allkinds)]
+ else:
+ self.tlinewidths = tlinewidths = self._process_linewidths()
+ tlinestyles = self._process_linestyles()
+ aa = self.antialiased
+ if aa is not None:
+ aa = (self.antialiased,)
+ # Default zorder taken from LineCollection, which is higher than
+ # for filled contours so that lines are displayed on top.
+ self._contour_zorder = kwargs.pop('zorder', 2)
+
+ self.collections[:] = [
+ mcoll.PathCollection(
+ self._make_paths(segs, kinds),
+ facecolors="none",
+ antialiaseds=aa,
+ linewidths=width,
+ linestyles=[lstyle],
+ alpha=self.alpha,
+ transform=self.get_transform(),
+ zorder=self._contour_zorder,
+ label='_nolegend_')
+ for level, width, lstyle, segs, kinds
+ in zip(self.levels, tlinewidths, tlinestyles, self.allsegs,
+ self.allkinds)]
+
+ for col in self.collections:
+ self.axes.add_collection(col, autolim=False)
+ col.sticky_edges.x[:] = [self._mins[0], self._maxs[0]]
+ col.sticky_edges.y[:] = [self._mins[1], self._maxs[1]]
+ self.axes.update_datalim([self._mins, self._maxs])
+ self.axes.autoscale_view(tight=True)
+
+ self.changed() # set the colors
+
+ if kwargs:
+ _api.warn_external(
+ 'The following kwargs were not used by contour: ' +
+ ", ".join(map(repr, kwargs))
+ )
+
+ def get_transform(self):
+ """
+ Return the :class:`~matplotlib.transforms.Transform`
+ instance used by this ContourSet.
+ """
+ if self._transform is None:
+ self._transform = self.axes.transData
+ elif (not isinstance(self._transform, mtransforms.Transform)
+ and hasattr(self._transform, '_as_mpl_transform')):
+ self._transform = self._transform._as_mpl_transform(self.axes)
+ return self._transform
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ # the C object _contour_generator cannot currently be pickled. This
+ # isn't a big issue as it is not actually used once the contour has
+ # been calculated.
+ state['_contour_generator'] = None
+ return state
+
+ def legend_elements(self, variable_name='x', str_format=str):
+ """
+ Return a list of artists and labels suitable for passing through
+ to `~.Axes.legend` which represent this ContourSet.
+
+ The labels have the form "0 < x <= 1" stating the data ranges which
+ the artists represent.
+
+ Parameters
+ ----------
+ variable_name : str
+ The string used inside the inequality used on the labels.
+ str_format : function: float -> str
+ Function used to format the numbers in the labels.
+
+ Returns
+ -------
+ artists : list[`.Artist`]
+ A list of the artists.
+ labels : list[str]
+ A list of the labels.
+ """
+ artists = []
+ labels = []
+
+ if self.filled:
+ lowers, uppers = self._get_lowers_and_uppers()
+ n_levels = len(self.collections)
+
+ for i, (collection, lower, upper) in enumerate(
+ zip(self.collections, lowers, uppers)):
+ patch = mpatches.Rectangle(
+ (0, 0), 1, 1,
+ facecolor=collection.get_facecolor()[0],
+ hatch=collection.get_hatch(),
+ alpha=collection.get_alpha())
+ artists.append(patch)
+
+ lower = str_format(lower)
+ upper = str_format(upper)
+
+ if i == 0 and self.extend in ('min', 'both'):
+ labels.append(fr'${variable_name} \leq {lower}s$')
+ elif i == n_levels - 1 and self.extend in ('max', 'both'):
+ labels.append(fr'${variable_name} > {upper}s$')
+ else:
+ labels.append(fr'${lower} < {variable_name} \leq {upper}$')
+ else:
+ for collection, level in zip(self.collections, self.levels):
+
+ patch = mcoll.LineCollection(None)
+ patch.update_from(collection)
+
+ artists.append(patch)
+ # format the level for insertion into the labels
+ level = str_format(level)
+ labels.append(fr'${variable_name} = {level}$')
+
+ return artists, labels
+
+ def _process_args(self, *args, **kwargs):
+ """
+ Process *args* and *kwargs*; override in derived classes.
+
+ Must set self.levels, self.zmin and self.zmax, and update axes limits.
+ """
+ self.levels = args[0]
+ self.allsegs = args[1]
+ self.allkinds = args[2] if len(args) > 2 else None
+ self.zmax = np.max(self.levels)
+ self.zmin = np.min(self.levels)
+
+ # Check lengths of levels and allsegs.
+ if self.filled:
+ if len(self.allsegs) != len(self.levels) - 1:
+ raise ValueError('must be one less number of segments as '
+ 'levels')
+ else:
+ if len(self.allsegs) != len(self.levels):
+ raise ValueError('must be same number of segments as levels')
+
+ # Check length of allkinds.
+ if (self.allkinds is not None and
+ len(self.allkinds) != len(self.allsegs)):
+ raise ValueError('allkinds has different length to allsegs')
+
+ # Determine x, y bounds and update axes data limits.
+ flatseglist = [s for seg in self.allsegs for s in seg]
+ points = np.concatenate(flatseglist, axis=0)
+ self._mins = points.min(axis=0)
+ self._maxs = points.max(axis=0)
+
+ return kwargs
+
+ def _get_allsegs_and_allkinds(self):
+ """Compute ``allsegs`` and ``allkinds`` using C extension."""
+ allsegs = []
+ allkinds = []
+ if self.filled:
+ lowers, uppers = self._get_lowers_and_uppers()
+ for level, level_upper in zip(lowers, uppers):
+ vertices, kinds = \
+ self._contour_generator.create_filled_contour(
+ level, level_upper)
+ allsegs.append(vertices)
+ allkinds.append(kinds)
+ else:
+ for level in self.levels:
+ vertices, kinds = self._contour_generator.create_contour(level)
+ allsegs.append(vertices)
+ allkinds.append(kinds)
+ return allsegs, allkinds
+
+ def _get_lowers_and_uppers(self):
+ """
+ Return ``(lowers, uppers)`` for filled contours.
+ """
+ lowers = self._levels[:-1]
+ if self.zmin == lowers[0]:
+ # Include minimum values in lowest interval
+ lowers = lowers.copy() # so we don't change self._levels
+ if self.logscale:
+ lowers[0] = 0.99 * self.zmin
+ else:
+ lowers[0] -= 1
+ uppers = self._levels[1:]
+ return (lowers, uppers)
+
+ def _make_paths(self, segs, kinds):
+ """
+ Create and return Path objects for the specified segments and optional
+ kind codes. segs is a list of numpy arrays, each array is either a
+ closed line loop or open line strip of 2D points with a shape of
+ (npoints, 2). kinds is either None or a list (with the same length as
+ segs) of numpy arrays, each array is of shape (npoints,) and contains
+ the kinds codes for the corresponding line in segs. If kinds is None
+ then the Path constructor creates the kind codes assuming that the line
+ is an open strip.
+ """
+ if kinds is None:
+ return [mpath.Path(seg) for seg in segs]
+ else:
+ return [mpath.Path(seg, codes=kind) for seg, kind
+ in zip(segs, kinds)]
+
+ def changed(self):
+ if not hasattr(self, "cvalues"):
+ # Just return after calling the super() changed function
+ cm.ScalarMappable.changed(self)
+ return
+ # Force an autoscale immediately because self.to_rgba() calls
+ # autoscale_None() internally with the data passed to it,
+ # so if vmin/vmax are not set yet, this would override them with
+ # content from *cvalues* rather than levels like we want
+ self.norm.autoscale_None(self.levels)
+ tcolors = [(tuple(rgba),)
+ for rgba in self.to_rgba(self.cvalues, alpha=self.alpha)]
+ self.tcolors = tcolors
+ hatches = self.hatches * len(tcolors)
+ for color, hatch, collection in zip(tcolors, hatches,
+ self.collections):
+ if self.filled:
+ collection.set_facecolor(color)
+ # update the collection's hatch (may be None)
+ collection.set_hatch(hatch)
+ else:
+ collection.set_edgecolor(color)
+ for label, cv in zip(self.labelTexts, self.labelCValues):
+ label.set_alpha(self.alpha)
+ label.set_color(self.labelMappable.to_rgba(cv))
+ # add label colors
+ cm.ScalarMappable.changed(self)
+
+ def _autolev(self, N):
+ """
+ Select contour levels to span the data.
+
+ The target number of levels, *N*, is used only when the
+ scale is not log and default locator is used.
+
+ We need two more levels for filled contours than for
+ line contours, because for the latter we need to specify
+ the lower and upper boundary of each range. For example,
+ a single contour boundary, say at z = 0, requires only
+ one contour line, but two filled regions, and therefore
+ three levels to provide boundaries for both regions.
+ """
+ if self.locator is None:
+ if self.logscale:
+ self.locator = ticker.LogLocator()
+ else:
+ self.locator = ticker.MaxNLocator(N + 1, min_n_ticks=1)
+
+ lev = self.locator.tick_values(self.zmin, self.zmax)
+
+ try:
+ if self.locator._symmetric:
+ return lev
+ except AttributeError:
+ pass
+
+ # Trim excess levels the locator may have supplied.
+ under = np.nonzero(lev < self.zmin)[0]
+ i0 = under[-1] if len(under) else 0
+ over = np.nonzero(lev > self.zmax)[0]
+ i1 = over[0] + 1 if len(over) else len(lev)
+ if self.extend in ('min', 'both'):
+ i0 += 1
+ if self.extend in ('max', 'both'):
+ i1 -= 1
+
+ if i1 - i0 < 3:
+ i0, i1 = 0, len(lev)
+
+ return lev[i0:i1]
+
+ def _process_contour_level_args(self, args):
+ """
+ Determine the contour levels and store in self.levels.
+ """
+ if self.levels is None:
+ if len(args) == 0:
+ levels_arg = 7 # Default, hard-wired.
+ else:
+ levels_arg = args[0]
+ else:
+ levels_arg = self.levels
+ if isinstance(levels_arg, Integral):
+ self.levels = self._autolev(levels_arg)
+ else:
+ self.levels = np.asarray(levels_arg).astype(np.float64)
+
+ if not self.filled:
+ inside = (self.levels > self.zmin) & (self.levels < self.zmax)
+ levels_in = self.levels[inside]
+ if len(levels_in) == 0:
+ self.levels = [self.zmin]
+ _api.warn_external(
+ "No contour levels were found within the data range.")
+
+ if self.filled and len(self.levels) < 2:
+ raise ValueError("Filled contours require at least 2 levels.")
+
+ if len(self.levels) > 1 and np.min(np.diff(self.levels)) <= 0.0:
+ raise ValueError("Contour levels must be increasing")
+
+ def _process_levels(self):
+ """
+ Assign values to :attr:`layers` based on :attr:`levels`,
+ adding extended layers as needed if contours are filled.
+
+ For line contours, layers simply coincide with levels;
+ a line is a thin layer. No extended levels are needed
+ with line contours.
+ """
+ # Make a private _levels to include extended regions; we
+ # want to leave the original levels attribute unchanged.
+ # (Colorbar needs this even for line contours.)
+ self._levels = list(self.levels)
+
+ if self.logscale:
+ lower, upper = 1e-250, 1e250
+ else:
+ lower, upper = -1e250, 1e250
+
+ if self.extend in ('both', 'min'):
+ self._levels.insert(0, lower)
+ if self.extend in ('both', 'max'):
+ self._levels.append(upper)
+ self._levels = np.asarray(self._levels)
+
+ if not self.filled:
+ self.layers = self.levels
+ return
+
+ # Layer values are mid-way between levels in screen space.
+ if self.logscale:
+ # Avoid overflow by taking sqrt before multiplying.
+ self.layers = (np.sqrt(self._levels[:-1])
+ * np.sqrt(self._levels[1:]))
+ else:
+ self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
+
+ def _process_colors(self):
+ """
+ Color argument processing for contouring.
+
+ Note that we base the colormapping on the contour levels
+ and layers, not on the actual range of the Z values. This
+ means we don't have to worry about bad values in Z, and we
+ always have the full dynamic range available for the selected
+ levels.
+
+ The color is based on the midpoint of the layer, except for
+ extended end layers. By default, the norm vmin and vmax
+ are the extreme values of the non-extended levels. Hence,
+ the layer color extremes are not the extreme values of
+ the colormap itself, but approach those values as the number
+ of levels increases. An advantage of this scheme is that
+ line contours, when added to filled contours, take on
+ colors that are consistent with those of the filled regions;
+ for example, a contour line on the boundary between two
+ regions will have a color intermediate between those
+ of the regions.
+
+ """
+ self.monochrome = self.cmap.monochrome
+ if self.colors is not None:
+ # Generate integers for direct indexing.
+ i0, i1 = 0, len(self.levels)
+ if self.filled:
+ i1 -= 1
+ # Out of range indices for over and under:
+ if self.extend in ('both', 'min'):
+ i0 -= 1
+ if self.extend in ('both', 'max'):
+ i1 += 1
+ self.cvalues = list(range(i0, i1))
+ self.set_norm(mcolors.NoNorm())
+ else:
+ self.cvalues = self.layers
+ self.set_array(self.levels)
+ self.autoscale_None()
+ if self.extend in ('both', 'max', 'min'):
+ self.norm.clip = False
+
+ # self.tcolors are set by the "changed" method
+
+ def _process_linewidths(self):
+ linewidths = self.linewidths
+ Nlev = len(self.levels)
+ if linewidths is None:
+ default_linewidth = mpl.rcParams['contour.linewidth']
+ if default_linewidth is None:
+ default_linewidth = mpl.rcParams['lines.linewidth']
+ tlinewidths = [(default_linewidth,)] * Nlev
+ else:
+ if not np.iterable(linewidths):
+ linewidths = [linewidths] * Nlev
+ else:
+ linewidths = list(linewidths)
+ if len(linewidths) < Nlev:
+ nreps = int(np.ceil(Nlev / len(linewidths)))
+ linewidths = linewidths * nreps
+ if len(linewidths) > Nlev:
+ linewidths = linewidths[:Nlev]
+ tlinewidths = [(w,) for w in linewidths]
+ return tlinewidths
+
+ def _process_linestyles(self):
+ linestyles = self.linestyles
+ Nlev = len(self.levels)
+ if linestyles is None:
+ tlinestyles = ['solid'] * Nlev
+ if self.monochrome:
+ neg_ls = mpl.rcParams['contour.negative_linestyle']
+ eps = - (self.zmax - self.zmin) * 1e-15
+ for i, lev in enumerate(self.levels):
+ if lev < eps:
+ tlinestyles[i] = neg_ls
+ else:
+ if isinstance(linestyles, str):
+ tlinestyles = [linestyles] * Nlev
+ elif np.iterable(linestyles):
+ tlinestyles = list(linestyles)
+ if len(tlinestyles) < Nlev:
+ nreps = int(np.ceil(Nlev / len(linestyles)))
+ tlinestyles = tlinestyles * nreps
+ if len(tlinestyles) > Nlev:
+ tlinestyles = tlinestyles[:Nlev]
+ else:
+ raise ValueError("Unrecognized type for linestyles kwarg")
+ return tlinestyles
+
+ def get_alpha(self):
+ """Return alpha to be applied to all ContourSet artists."""
+ return self.alpha
+
+ def set_alpha(self, alpha):
+ """
+ Set the alpha blending value for all ContourSet artists.
+ *alpha* must be between 0 (transparent) and 1 (opaque).
+ """
+ self.alpha = alpha
+ self.changed()
+
+ def find_nearest_contour(self, x, y, indices=None, pixel=True):
+ """
+ Find the point in the contour plot that is closest to ``(x, y)``.
+
+ This method does not support filled contours.
+
+ Parameters
+ ----------
+ x, y : float
+ The reference point.
+ indices : list of int or None, default: None
+ Indices of contour levels to consider. If None (the default), all
+ levels are considered.
+ pixel : bool, default: True
+ If *True*, measure distance in pixel (screen) space, which is
+ useful for manual contour labeling; else, measure distance in axes
+ space.
+
+ Returns
+ -------
+ contour : `.Collection`
+ The contour that is closest to ``(x, y)``.
+ segment : int
+ The index of the `.Path` in *contour* that is closest to
+ ``(x, y)``.
+ index : int
+ The index of the path segment in *segment* that is closest to
+ ``(x, y)``.
+ xmin, ymin : float
+ The point in the contour plot that is closest to ``(x, y)``.
+ d2 : float
+ The squared distance from ``(xmin, ymin)`` to ``(x, y)``.
+ """
+
+ # This function uses a method that is probably quite
+ # inefficient based on converting each contour segment to
+ # pixel coordinates and then comparing the given point to
+ # those coordinates for each contour. This will probably be
+ # quite slow for complex contours, but for normal use it works
+ # sufficiently well that the time is not noticeable.
+ # Nonetheless, improvements could probably be made.
+
+ if self.filled:
+ raise ValueError("Method does not support filled contours.")
+
+ if indices is None:
+ indices = range(len(self.collections))
+
+ d2min = np.inf
+ conmin = None
+ segmin = None
+ xmin = None
+ ymin = None
+
+ point = np.array([x, y])
+
+ for icon in indices:
+ con = self.collections[icon]
+ trans = con.get_transform()
+ paths = con.get_paths()
+
+ for segNum, linepath in enumerate(paths):
+ lc = linepath.vertices
+ # transfer all data points to screen coordinates if desired
+ if pixel:
+ lc = trans.transform(lc)
+
+ d2, xc, leg = _find_closest_point_on_path(lc, point)
+ if d2 < d2min:
+ d2min = d2
+ conmin = icon
+ segmin = segNum
+ imin = leg[1]
+ xmin = xc[0]
+ ymin = xc[1]
+
+ return (conmin, segmin, imin, xmin, ymin, d2min)
+
+
+@docstring.dedent_interpd
+class QuadContourSet(ContourSet):
+ """
+ Create and store a set of contour lines or filled regions.
+
+ This class is typically not instantiated directly by the user but by
+ `~.Axes.contour` and `~.Axes.contourf`.
+
+ %(contour_set_attributes)s
+ """
+
+ def _process_args(self, *args, corner_mask=None, **kwargs):
+ """
+ Process args and kwargs.
+ """
+ if isinstance(args[0], QuadContourSet):
+ if self.levels is None:
+ self.levels = args[0].levels
+ self.zmin = args[0].zmin
+ self.zmax = args[0].zmax
+ self._corner_mask = args[0]._corner_mask
+ contour_generator = args[0]._contour_generator
+ self._mins = args[0]._mins
+ self._maxs = args[0]._maxs
+ else:
+ import matplotlib._contour as _contour
+
+ if corner_mask is None:
+ corner_mask = mpl.rcParams['contour.corner_mask']
+ self._corner_mask = corner_mask
+
+ x, y, z = self._contour_args(args, kwargs)
+
+ _mask = ma.getmask(z)
+ if _mask is ma.nomask or not _mask.any():
+ _mask = None
+
+ contour_generator = _contour.QuadContourGenerator(
+ x, y, z.filled(), _mask, self._corner_mask, self.nchunk)
+
+ t = self.get_transform()
+
+ # if the transform is not trans data, and some part of it
+ # contains transData, transform the xs and ys to data coordinates
+ if (t != self.axes.transData and
+ any(t.contains_branch_seperately(self.axes.transData))):
+ trans_to_data = t - self.axes.transData
+ pts = np.vstack([x.flat, y.flat]).T
+ transformed_pts = trans_to_data.transform(pts)
+ x = transformed_pts[..., 0]
+ y = transformed_pts[..., 1]
+
+ self._mins = [ma.min(x), ma.min(y)]
+ self._maxs = [ma.max(x), ma.max(y)]
+
+ self._contour_generator = contour_generator
+
+ return kwargs
+
+ def _contour_args(self, args, kwargs):
+ if self.filled:
+ fn = 'contourf'
+ else:
+ fn = 'contour'
+ Nargs = len(args)
+ if Nargs <= 2:
+ z = ma.asarray(args[0], dtype=np.float64)
+ x, y = self._initialize_x_y(z)
+ args = args[1:]
+ elif Nargs <= 4:
+ x, y, z = self._check_xyz(args[:3], kwargs)
+ args = args[3:]
+ else:
+ raise TypeError("Too many arguments to %s; see help(%s)" %
+ (fn, fn))
+ z = ma.masked_invalid(z, copy=False)
+ self.zmax = float(z.max())
+ self.zmin = float(z.min())
+ if self.logscale and self.zmin <= 0:
+ z = ma.masked_where(z <= 0, z)
+ _api.warn_external('Log scale: values of z <= 0 have been masked')
+ self.zmin = float(z.min())
+ self._process_contour_level_args(args)
+ return (x, y, z)
+
+ def _check_xyz(self, args, kwargs):
+ """
+ Check that the shapes of the input arrays match; if x and y are 1D,
+ convert them to 2D using meshgrid.
+ """
+ x, y = args[:2]
+ x, y = self.axes._process_unit_info([("x", x), ("y", y)], kwargs)
+
+ x = np.asarray(x, dtype=np.float64)
+ y = np.asarray(y, dtype=np.float64)
+ z = ma.asarray(args[2], dtype=np.float64)
+
+ if z.ndim != 2:
+ raise TypeError(f"Input z must be 2D, not {z.ndim}D")
+ if z.shape[0] < 2 or z.shape[1] < 2:
+ raise TypeError(f"Input z must be at least a (2, 2) shaped array, "
+ f"but has shape {z.shape}")
+ Ny, Nx = z.shape
+
+ if x.ndim != y.ndim:
+ raise TypeError(f"Number of dimensions of x ({x.ndim}) and y "
+ f"({y.ndim}) do not match")
+ if x.ndim == 1:
+ nx, = x.shape
+ ny, = y.shape
+ if nx != Nx:
+ raise TypeError(f"Length of x ({nx}) must match number of "
+ f"columns in z ({Nx})")
+ if ny != Ny:
+ raise TypeError(f"Length of y ({ny}) must match number of "
+ f"rows in z ({Ny})")
+ x, y = np.meshgrid(x, y)
+ elif x.ndim == 2:
+ if x.shape != z.shape:
+ raise TypeError(
+ f"Shapes of x {x.shape} and z {z.shape} do not match")
+ if y.shape != z.shape:
+ raise TypeError(
+ f"Shapes of y {y.shape} and z {z.shape} do not match")
+ else:
+ raise TypeError(f"Inputs x and y must be 1D or 2D, not {x.ndim}D")
+
+ return x, y, z
+
+ def _initialize_x_y(self, z):
+ """
+ Return X, Y arrays such that contour(Z) will match imshow(Z)
+ if origin is not None.
+ The center of pixel Z[i, j] depends on origin:
+ if origin is None, x = j, y = i;
+ if origin is 'lower', x = j + 0.5, y = i + 0.5;
+ if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
+ If extent is not None, x and y will be scaled to match,
+ as in imshow.
+ If origin is None and extent is not None, then extent
+ will give the minimum and maximum values of x and y.
+ """
+ if z.ndim != 2:
+ raise TypeError(f"Input z must be 2D, not {z.ndim}D")
+ elif z.shape[0] < 2 or z.shape[1] < 2:
+ raise TypeError(f"Input z must be at least a (2, 2) shaped array, "
+ f"but has shape {z.shape}")
+ else:
+ Ny, Nx = z.shape
+ if self.origin is None: # Not for image-matching.
+ if self.extent is None:
+ return np.meshgrid(np.arange(Nx), np.arange(Ny))
+ else:
+ x0, x1, y0, y1 = self.extent
+ x = np.linspace(x0, x1, Nx)
+ y = np.linspace(y0, y1, Ny)
+ return np.meshgrid(x, y)
+ # Match image behavior:
+ if self.extent is None:
+ x0, x1, y0, y1 = (0, Nx, 0, Ny)
+ else:
+ x0, x1, y0, y1 = self.extent
+ dx = (x1 - x0) / Nx
+ dy = (y1 - y0) / Ny
+ x = x0 + (np.arange(Nx) + 0.5) * dx
+ y = y0 + (np.arange(Ny) + 0.5) * dy
+ if self.origin == 'upper':
+ y = y[::-1]
+ return np.meshgrid(x, y)
+
+
+docstring.interpd.update(contour_doc="""
+`.contour` and `.contourf` draw contour lines and filled contours,
+respectively. Except as noted, function signatures and return values
+are the same for both versions.
+
+Parameters
+----------
+X, Y : array-like, optional
+ The coordinates of the values in *Z*.
+
+ *X* and *Y* must both be 2D with the same shape as *Z* (e.g.
+ created via `numpy.meshgrid`), or they must both be 1-D such
+ that ``len(X) == N`` is the number of columns in *Z* and
+ ``len(Y) == M`` is the number of rows in *Z*.
+
+ *X* and *Y* must both be ordered monotonically.
+
+ If not given, they are assumed to be integer indices, i.e.
+ ``X = range(N)``, ``Y = range(M)``.
+
+Z : (M, N) array-like
+ The height values over which the contour is drawn.
+
+levels : int or array-like, optional
+ Determines the number and positions of the contour lines / regions.
+
+ If an int *n*, use `~matplotlib.ticker.MaxNLocator`, which tries
+ to automatically choose no more than *n+1* "nice" contour levels
+ between *vmin* and *vmax*.
+
+ If array-like, draw contour lines at the specified levels.
+ The values must be in increasing order.
+
+Returns
+-------
+`~.contour.QuadContourSet`
+
+Other Parameters
+----------------
+corner_mask : bool, default: :rc:`contour.corner_mask`
+ Enable/disable corner masking, which only has an effect if *Z* is
+ a masked array. If ``False``, any quad touching a masked point is
+ masked out. If ``True``, only the triangular corners of quads
+ nearest those points are always masked out, other triangular
+ corners comprising three unmasked points are contoured as usual.
+
+colors : color string or sequence of colors, optional
+ The colors of the levels, i.e. the lines for `.contour` and the
+ areas for `.contourf`.
+
+ The sequence is cycled for the levels in ascending order. If the
+ sequence is shorter than the number of levels, it's repeated.
+
+ As a shortcut, single color strings may be used in place of
+ one-element lists, i.e. ``'red'`` instead of ``['red']`` to color
+ all levels with the same color. This shortcut does only work for
+ color strings, not for other ways of specifying colors.
+
+ By default (value *None*), the colormap specified by *cmap*
+ will be used.
+
+alpha : float, default: 1
+ The alpha blending value, between 0 (transparent) and 1 (opaque).
+
+cmap : str or `.Colormap`, default: :rc:`image.cmap`
+ A `.Colormap` instance or registered colormap name. The colormap
+ maps the level values to colors.
+
+ If both *colors* and *cmap* are given, an error is raised.
+
+norm : `~matplotlib.colors.Normalize`, optional
+ If a colormap is used, the `.Normalize` instance scales the level
+ values to the canonical colormap range [0, 1] for mapping to
+ colors. If not given, the default linear scaling is used.
+
+vmin, vmax : float, optional
+ If not *None*, either or both of these values will be supplied to
+ the `.Normalize` instance, overriding the default color scaling
+ based on *levels*.
+
+origin : {*None*, 'upper', 'lower', 'image'}, default: None
+ Determines the orientation and exact position of *Z* by specifying
+ the position of ``Z[0, 0]``. This is only relevant, if *X*, *Y*
+ are not given.
+
+ - *None*: ``Z[0, 0]`` is at X=0, Y=0 in the lower left corner.
+ - 'lower': ``Z[0, 0]`` is at X=0.5, Y=0.5 in the lower left corner.
+ - 'upper': ``Z[0, 0]`` is at X=N+0.5, Y=0.5 in the upper left
+ corner.
+ - 'image': Use the value from :rc:`image.origin`.
+
+extent : (x0, x1, y0, y1), optional
+ If *origin* is not *None*, then *extent* is interpreted as in
+ `.imshow`: it gives the outer pixel boundaries. In this case, the
+ position of Z[0, 0] is the center of the pixel, not a corner. If
+ *origin* is *None*, then (*x0*, *y0*) is the position of Z[0, 0],
+ and (*x1*, *y1*) is the position of Z[-1, -1].
+
+ This argument is ignored if *X* and *Y* are specified in the call
+ to contour.
+
+locator : ticker.Locator subclass, optional
+ The locator is used to determine the contour levels if they
+ are not given explicitly via *levels*.
+ Defaults to `~.ticker.MaxNLocator`.
+
+extend : {'neither', 'both', 'min', 'max'}, default: 'neither'
+ Determines the ``contourf``-coloring of values that are outside the
+ *levels* range.
+
+ If 'neither', values outside the *levels* range are not colored.
+ If 'min', 'max' or 'both', color the values below, above or below
+ and above the *levels* range.
+
+ Values below ``min(levels)`` and above ``max(levels)`` are mapped
+ to the under/over values of the `.Colormap`. Note that most
+ colormaps do not have dedicated colors for these by default, so
+ that the over and under values are the edge values of the colormap.
+ You may want to set these values explicitly using
+ `.Colormap.set_under` and `.Colormap.set_over`.
+
+ .. note::
+
+ An existing `.QuadContourSet` does not get notified if
+ properties of its colormap are changed. Therefore, an explicit
+ call `.QuadContourSet.changed()` is needed after modifying the
+ colormap. The explicit call can be left out, if a colorbar is
+ assigned to the `.QuadContourSet` because it internally calls
+ `.QuadContourSet.changed()`.
+
+ Example::
+
+ x = np.arange(1, 10)
+ y = x.reshape(-1, 1)
+ h = x * y
+
+ cs = plt.contourf(h, levels=[10, 30, 50],
+ colors=['#808080', '#A0A0A0', '#C0C0C0'], extend='both')
+ cs.cmap.set_over('red')
+ cs.cmap.set_under('blue')
+ cs.changed()
+
+xunits, yunits : registered units, optional
+ Override axis units by specifying an instance of a
+ :class:`matplotlib.units.ConversionInterface`.
+
+antialiased : bool, optional
+ Enable antialiasing, overriding the defaults. For
+ filled contours, the default is *True*. For line contours,
+ it is taken from :rc:`lines.antialiased`.
+
+nchunk : int >= 0, optional
+ If 0, no subdivision of the domain. Specify a positive integer to
+ divide the domain into subdomains of *nchunk* by *nchunk* quads.
+ Chunking reduces the maximum length of polygons generated by the
+ contouring algorithm which reduces the rendering workload passed
+ on to the backend and also requires slightly less RAM. It can
+ however introduce rendering artifacts at chunk boundaries depending
+ on the backend, the *antialiased* flag and value of *alpha*.
+
+linewidths : float or array-like, default: :rc:`contour.linewidth`
+ *Only applies to* `.contour`.
+
+ The line width of the contour lines.
+
+ If a number, all levels will be plotted with this linewidth.
+
+ If a sequence, the levels in ascending order will be plotted with
+ the linewidths in the order specified.
+
+ If None, this falls back to :rc:`lines.linewidth`.
+
+linestyles : {*None*, 'solid', 'dashed', 'dashdot', 'dotted'}, optional
+ *Only applies to* `.contour`.
+
+ If *linestyles* is *None*, the default is 'solid' unless the lines
+ are monochrome. In that case, negative contours will take their
+ linestyle from :rc:`contour.negative_linestyle` setting.
+
+ *linestyles* can also be an iterable of the above strings
+ specifying a set of linestyles to be used. If this
+ iterable is shorter than the number of contour levels
+ it will be repeated as necessary.
+
+hatches : list[str], optional
+ *Only applies to* `.contourf`.
+
+ A list of cross hatch patterns to use on the filled areas.
+ If None, no hatching will be added to the contour.
+ Hatching is supported in the PostScript, PDF, SVG and Agg
+ backends only.
+
+data : indexable object, optional
+ DATA_PARAMETER_PLACEHOLDER
+
+Notes
+-----
+1. `.contourf` differs from the MATLAB version in that it does not draw
+ the polygon edges. To draw edges, add line contours with calls to
+ `.contour`.
+
+2. `.contourf` fills intervals that are closed at the top; that is, for
+ boundaries *z1* and *z2*, the filled region is::
+
+ z1 < Z <= z2
+
+ except for the lowest interval, which is closed on both sides (i.e.
+ it includes the lowest value).
+
+3. `.contour` and `.contourf` use a `marching squares
+ `_ algorithm to
+ compute contour locations. More information can be found in
+ the source ``src/_contour.h``.
+""")
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/dviread.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/dviread.py
new file mode 100644
index 0000000000000000000000000000000000000000..3207a01de8be18c167d594238de31eeb5dcb759d
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/dviread.py
@@ -0,0 +1,1097 @@
+"""
+A module for reading dvi files output by TeX. Several limitations make
+this not (currently) useful as a general-purpose dvi preprocessor, but
+it is currently used by the pdf backend for processing usetex text.
+
+Interface::
+
+ with Dvi(filename, 72) as dvi:
+ # iterate over pages:
+ for page in dvi:
+ w, h, d = page.width, page.height, page.descent
+ for x, y, font, glyph, width in page.text:
+ fontname = font.texname
+ pointsize = font.size
+ ...
+ for x, y, height, width in page.boxes:
+ ...
+"""
+
+from collections import namedtuple
+import enum
+from functools import lru_cache, partial, wraps
+import logging
+import os
+from pathlib import Path
+import re
+import struct
+import subprocess
+import sys
+
+import numpy as np
+
+from matplotlib import _api, cbook
+
+_log = logging.getLogger(__name__)
+
+# Many dvi related files are looked for by external processes, require
+# additional parsing, and are used many times per rendering, which is why they
+# are cached using lru_cache().
+
+# Dvi is a bytecode format documented in
+# https://ctan.org/pkg/dvitype
+# https://texdoc.org/serve/dvitype.pdf/0
+#
+# The file consists of a preamble, some number of pages, a postamble,
+# and a finale. Different opcodes are allowed in different contexts,
+# so the Dvi object has a parser state:
+#
+# pre: expecting the preamble
+# outer: between pages (followed by a page or the postamble,
+# also e.g. font definitions are allowed)
+# page: processing a page
+# post_post: state after the postamble (our current implementation
+# just stops reading)
+# finale: the finale (unimplemented in our current implementation)
+
+_dvistate = enum.Enum('DviState', 'pre outer inpage post_post finale')
+
+# The marks on a page consist of text and boxes. A page also has dimensions.
+Page = namedtuple('Page', 'text boxes height width descent')
+Text = namedtuple('Text', 'x y font glyph width')
+Box = namedtuple('Box', 'x y height width')
+
+
+# Opcode argument parsing
+#
+# Each of the following functions takes a Dvi object and delta,
+# which is the difference between the opcode and the minimum opcode
+# with the same meaning. Dvi opcodes often encode the number of
+# argument bytes in this delta.
+
+def _arg_raw(dvi, delta):
+ """Return *delta* without reading anything more from the dvi file."""
+ return delta
+
+
+def _arg(nbytes, signed, dvi, _):
+ """
+ Read *nbytes* bytes, returning the bytes interpreted as a signed integer
+ if *signed* is true, unsigned otherwise.
+ """
+ return dvi._arg(nbytes, signed)
+
+
+def _arg_slen(dvi, delta):
+ """
+ Read *delta* bytes, returning None if *delta* is zero, and the bytes
+ interpreted as a signed integer otherwise.
+ """
+ if delta == 0:
+ return None
+ return dvi._arg(delta, True)
+
+
+def _arg_slen1(dvi, delta):
+ """
+ Read *delta*+1 bytes, returning the bytes interpreted as signed.
+ """
+ return dvi._arg(delta + 1, True)
+
+
+def _arg_ulen1(dvi, delta):
+ """
+ Read *delta*+1 bytes, returning the bytes interpreted as unsigned.
+ """
+ return dvi._arg(delta + 1, False)
+
+
+def _arg_olen1(dvi, delta):
+ """
+ Read *delta*+1 bytes, returning the bytes interpreted as
+ unsigned integer for 0<=*delta*<3 and signed if *delta*==3.
+ """
+ return dvi._arg(delta + 1, delta == 3)
+
+
+_arg_mapping = dict(raw=_arg_raw,
+ u1=partial(_arg, 1, False),
+ u4=partial(_arg, 4, False),
+ s4=partial(_arg, 4, True),
+ slen=_arg_slen,
+ olen1=_arg_olen1,
+ slen1=_arg_slen1,
+ ulen1=_arg_ulen1)
+
+
+def _dispatch(table, min, max=None, state=None, args=('raw',)):
+ """
+ Decorator for dispatch by opcode. Sets the values in *table*
+ from *min* to *max* to this method, adds a check that the Dvi state
+ matches *state* if not None, reads arguments from the file according
+ to *args*.
+
+ Parameters
+ ----------
+ table : dict[int, callable]
+ The dispatch table to be filled in.
+
+ min, max : int
+ Range of opcodes that calls the registered function; *max* defaults to
+ *min*.
+
+ state : _dvistate, optional
+ State of the Dvi object in which these opcodes are allowed.
+
+ args : list[str], default: ['raw']
+ Sequence of argument specifications:
+
+ - 'raw': opcode minus minimum
+ - 'u1': read one unsigned byte
+ - 'u4': read four bytes, treat as an unsigned number
+ - 's4': read four bytes, treat as a signed number
+ - 'slen': read (opcode - minimum) bytes, treat as signed
+ - 'slen1': read (opcode - minimum + 1) bytes, treat as signed
+ - 'ulen1': read (opcode - minimum + 1) bytes, treat as unsigned
+ - 'olen1': read (opcode - minimum + 1) bytes, treat as unsigned
+ if under four bytes, signed if four bytes
+ """
+ def decorate(method):
+ get_args = [_arg_mapping[x] for x in args]
+
+ @wraps(method)
+ def wrapper(self, byte):
+ if state is not None and self.state != state:
+ raise ValueError("state precondition failed")
+ return method(self, *[f(self, byte-min) for f in get_args])
+ if max is None:
+ table[min] = wrapper
+ else:
+ for i in range(min, max+1):
+ assert table[i] is None
+ table[i] = wrapper
+ return wrapper
+ return decorate
+
+
+class Dvi:
+ """
+ A reader for a dvi ("device-independent") file, as produced by TeX.
+
+ The current implementation can only iterate through pages in order,
+ and does not even attempt to verify the postamble.
+
+ This class can be used as a context manager to close the underlying
+ file upon exit. Pages can be read via iteration. Here is an overly
+ simple way to extract text without trying to detect whitespace::
+
+ >>> with matplotlib.dviread.Dvi('input.dvi', 72) as dvi:
+ ... for page in dvi:
+ ... print(''.join(chr(t.glyph) for t in page.text))
+ """
+ # dispatch table
+ _dtable = [None] * 256
+ _dispatch = partial(_dispatch, _dtable)
+
+ def __init__(self, filename, dpi):
+ """
+ Read the data from the file named *filename* and convert
+ TeX's internal units to units of *dpi* per inch.
+ *dpi* only sets the units and does not limit the resolution.
+ Use None to return TeX's internal units.
+ """
+ _log.debug('Dvi: %s', filename)
+ self.file = open(filename, 'rb')
+ self.dpi = dpi
+ self.fonts = {}
+ self.state = _dvistate.pre
+
+ baseline = _api.deprecated("3.5")(property(lambda self: None))
+
+ def __enter__(self):
+ """Context manager enter method, does nothing."""
+ return self
+
+ def __exit__(self, etype, evalue, etrace):
+ """
+ Context manager exit method, closes the underlying file if it is open.
+ """
+ self.close()
+
+ def __iter__(self):
+ """
+ Iterate through the pages of the file.
+
+ Yields
+ ------
+ Page
+ Details of all the text and box objects on the page.
+ The Page tuple contains lists of Text and Box tuples and
+ the page dimensions, and the Text and Box tuples contain
+ coordinates transformed into a standard Cartesian
+ coordinate system at the dpi value given when initializing.
+ The coordinates are floating point numbers, but otherwise
+ precision is not lost and coordinate values are not clipped to
+ integers.
+ """
+ while self._read():
+ yield self._output()
+
+ def close(self):
+ """Close the underlying file if it is open."""
+ if not self.file.closed:
+ self.file.close()
+
+ def _output(self):
+ """
+ Output the text and boxes belonging to the most recent page.
+ page = dvi._output()
+ """
+ minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
+ maxy_pure = -np.inf
+ for elt in self.text + self.boxes:
+ if isinstance(elt, Box):
+ x, y, h, w = elt
+ e = 0 # zero depth
+ else: # glyph
+ x, y, font, g, w = elt
+ h, e = font._height_depth_of(g)
+ minx = min(minx, x)
+ miny = min(miny, y - h)
+ maxx = max(maxx, x + w)
+ maxy = max(maxy, y + e)
+ maxy_pure = max(maxy_pure, y)
+ if self._baseline_v is not None:
+ maxy_pure = self._baseline_v # This should normally be the case.
+ self._baseline_v = None
+
+ if not self.text and not self.boxes: # Avoid infs/nans from inf+/-inf.
+ return Page(text=[], boxes=[], width=0, height=0, descent=0)
+
+ if self.dpi is None:
+ # special case for ease of debugging: output raw dvi coordinates
+ return Page(text=self.text, boxes=self.boxes,
+ width=maxx-minx, height=maxy_pure-miny,
+ descent=maxy-maxy_pure)
+
+ # convert from TeX's "scaled points" to dpi units
+ d = self.dpi / (72.27 * 2**16)
+ descent = (maxy - maxy_pure) * d
+
+ text = [Text((x-minx)*d, (maxy-y)*d - descent, f, g, w*d)
+ for (x, y, f, g, w) in self.text]
+ boxes = [Box((x-minx)*d, (maxy-y)*d - descent, h*d, w*d)
+ for (x, y, h, w) in self.boxes]
+
+ return Page(text=text, boxes=boxes, width=(maxx-minx)*d,
+ height=(maxy_pure-miny)*d, descent=descent)
+
+ def _read(self):
+ """
+ Read one page from the file. Return True if successful,
+ False if there were no more pages.
+ """
+ # Pages appear to start with the sequence
+ # bop (begin of page)
+ # xxx comment
+ # # if using chemformula
+ # down
+ # push
+ # down
+ # # if using xcolor
+ # down
+ # push
+ # down (possibly multiple)
+ # push <= here, v is the baseline position.
+ # etc.
+ # (dviasm is useful to explore this structure.)
+ # Thus, we use the vertical position at the first time the stack depth
+ # reaches 3, while at least three "downs" have been executed (excluding
+ # those popped out (corresponding to the chemformula preamble)), as the
+ # baseline (the "down" count is necessary to handle xcolor).
+ down_stack = [0]
+ self._baseline_v = None
+ while True:
+ byte = self.file.read(1)[0]
+ self._dtable[byte](self, byte)
+ name = self._dtable[byte].__name__
+ if name == "_push":
+ down_stack.append(down_stack[-1])
+ elif name == "_pop":
+ down_stack.pop()
+ elif name == "_down":
+ down_stack[-1] += 1
+ if (self._baseline_v is None
+ and len(getattr(self, "stack", [])) == 3
+ and down_stack[-1] >= 4):
+ self._baseline_v = self.v
+ if byte == 140: # end of page
+ return True
+ if self.state is _dvistate.post_post: # end of file
+ self.close()
+ return False
+
+ def _arg(self, nbytes, signed=False):
+ """
+ Read and return an integer argument *nbytes* long.
+ Signedness is determined by the *signed* keyword.
+ """
+ buf = self.file.read(nbytes)
+ value = buf[0]
+ if signed and value >= 0x80:
+ value = value - 0x100
+ for b in buf[1:]:
+ value = 0x100*value + b
+ return value
+
+ @_dispatch(min=0, max=127, state=_dvistate.inpage)
+ def _set_char_immediate(self, char):
+ self._put_char_real(char)
+ self.h += self.fonts[self.f]._width_of(char)
+
+ @_dispatch(min=128, max=131, state=_dvistate.inpage, args=('olen1',))
+ def _set_char(self, char):
+ self._put_char_real(char)
+ self.h += self.fonts[self.f]._width_of(char)
+
+ @_dispatch(132, state=_dvistate.inpage, args=('s4', 's4'))
+ def _set_rule(self, a, b):
+ self._put_rule_real(a, b)
+ self.h += b
+
+ @_dispatch(min=133, max=136, state=_dvistate.inpage, args=('olen1',))
+ def _put_char(self, char):
+ self._put_char_real(char)
+
+ def _put_char_real(self, char):
+ font = self.fonts[self.f]
+ if font._vf is None:
+ self.text.append(Text(self.h, self.v, font, char,
+ font._width_of(char)))
+ else:
+ scale = font._scale
+ for x, y, f, g, w in font._vf[char].text:
+ newf = DviFont(scale=_mul2012(scale, f._scale),
+ tfm=f._tfm, texname=f.texname, vf=f._vf)
+ self.text.append(Text(self.h + _mul2012(x, scale),
+ self.v + _mul2012(y, scale),
+ newf, g, newf._width_of(g)))
+ self.boxes.extend([Box(self.h + _mul2012(x, scale),
+ self.v + _mul2012(y, scale),
+ _mul2012(a, scale), _mul2012(b, scale))
+ for x, y, a, b in font._vf[char].boxes])
+
+ @_dispatch(137, state=_dvistate.inpage, args=('s4', 's4'))
+ def _put_rule(self, a, b):
+ self._put_rule_real(a, b)
+
+ def _put_rule_real(self, a, b):
+ if a > 0 and b > 0:
+ self.boxes.append(Box(self.h, self.v, a, b))
+
+ @_dispatch(138)
+ def _nop(self, _):
+ pass
+
+ @_dispatch(139, state=_dvistate.outer, args=('s4',)*11)
+ def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
+ self.state = _dvistate.inpage
+ self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
+ self.stack = []
+ self.text = [] # list of Text objects
+ self.boxes = [] # list of Box objects
+
+ @_dispatch(140, state=_dvistate.inpage)
+ def _eop(self, _):
+ self.state = _dvistate.outer
+ del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
+
+ @_dispatch(141, state=_dvistate.inpage)
+ def _push(self, _):
+ self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
+
+ @_dispatch(142, state=_dvistate.inpage)
+ def _pop(self, _):
+ self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
+
+ @_dispatch(min=143, max=146, state=_dvistate.inpage, args=('slen1',))
+ def _right(self, b):
+ self.h += b
+
+ @_dispatch(min=147, max=151, state=_dvistate.inpage, args=('slen',))
+ def _right_w(self, new_w):
+ if new_w is not None:
+ self.w = new_w
+ self.h += self.w
+
+ @_dispatch(min=152, max=156, state=_dvistate.inpage, args=('slen',))
+ def _right_x(self, new_x):
+ if new_x is not None:
+ self.x = new_x
+ self.h += self.x
+
+ @_dispatch(min=157, max=160, state=_dvistate.inpage, args=('slen1',))
+ def _down(self, a):
+ self.v += a
+
+ @_dispatch(min=161, max=165, state=_dvistate.inpage, args=('slen',))
+ def _down_y(self, new_y):
+ if new_y is not None:
+ self.y = new_y
+ self.v += self.y
+
+ @_dispatch(min=166, max=170, state=_dvistate.inpage, args=('slen',))
+ def _down_z(self, new_z):
+ if new_z is not None:
+ self.z = new_z
+ self.v += self.z
+
+ @_dispatch(min=171, max=234, state=_dvistate.inpage)
+ def _fnt_num_immediate(self, k):
+ self.f = k
+
+ @_dispatch(min=235, max=238, state=_dvistate.inpage, args=('olen1',))
+ def _fnt_num(self, new_f):
+ self.f = new_f
+
+ @_dispatch(min=239, max=242, args=('ulen1',))
+ def _xxx(self, datalen):
+ special = self.file.read(datalen)
+ _log.debug(
+ 'Dvi._xxx: encountered special: %s',
+ ''.join([chr(ch) if 32 <= ch < 127 else '<%02x>' % ch
+ for ch in special]))
+
+ @_dispatch(min=243, max=246, args=('olen1', 'u4', 'u4', 'u4', 'u1', 'u1'))
+ def _fnt_def(self, k, c, s, d, a, l):
+ self._fnt_def_real(k, c, s, d, a, l)
+
+ def _fnt_def_real(self, k, c, s, d, a, l):
+ n = self.file.read(a + l)
+ fontname = n[-l:].decode('ascii')
+ tfm = _tfmfile(fontname)
+ if tfm is None:
+ raise FileNotFoundError("missing font metrics file: %s" % fontname)
+ if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
+ raise ValueError('tfm checksum mismatch: %s' % n)
+
+ vf = _vffile(fontname)
+
+ self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
+
+ @_dispatch(247, state=_dvistate.pre, args=('u1', 'u4', 'u4', 'u4', 'u1'))
+ def _pre(self, i, num, den, mag, k):
+ self.file.read(k) # comment in the dvi file
+ if i != 2:
+ raise ValueError("Unknown dvi format %d" % i)
+ if num != 25400000 or den != 7227 * 2**16:
+ raise ValueError("Nonstandard units in dvi file")
+ # meaning: TeX always uses those exact values, so it
+ # should be enough for us to support those
+ # (There are 72.27 pt to an inch so 7227 pt =
+ # 7227 * 2**16 sp to 100 in. The numerator is multiplied
+ # by 10^5 to get units of 10**-7 meters.)
+ if mag != 1000:
+ raise ValueError("Nonstandard magnification in dvi file")
+ # meaning: LaTeX seems to frown on setting \mag, so
+ # I think we can assume this is constant
+ self.state = _dvistate.outer
+
+ @_dispatch(248, state=_dvistate.outer)
+ def _post(self, _):
+ self.state = _dvistate.post_post
+ # TODO: actually read the postamble and finale?
+ # currently post_post just triggers closing the file
+
+ @_dispatch(249)
+ def _post_post(self, _):
+ raise NotImplementedError
+
+ @_dispatch(min=250, max=255)
+ def _malformed(self, offset):
+ raise ValueError(f"unknown command: byte {250 + offset}")
+
+
+class DviFont:
+ """
+ Encapsulation of a font that a DVI file can refer to.
+
+ This class holds a font's texname and size, supports comparison,
+ and knows the widths of glyphs in the same units as the AFM file.
+ There are also internal attributes (for use by dviread.py) that
+ are *not* used for comparison.
+
+ The size is in Adobe points (converted from TeX points).
+
+ Parameters
+ ----------
+ scale : float
+ Factor by which the font is scaled from its natural size.
+ tfm : Tfm
+ TeX font metrics for this font
+ texname : bytes
+ Name of the font as used internally by TeX and friends, as an ASCII
+ bytestring. This is usually very different from any external font
+ names; `PsfontsMap` can be used to find the external name of the font.
+ vf : Vf
+ A TeX "virtual font" file, or None if this font is not virtual.
+
+ Attributes
+ ----------
+ texname : bytes
+ size : float
+ Size of the font in Adobe points, converted from the slightly
+ smaller TeX points.
+ widths : list
+ Widths of glyphs in glyph-space units, typically 1/1000ths of
+ the point size.
+
+ """
+ __slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
+
+ def __init__(self, scale, tfm, texname, vf):
+ _api.check_isinstance(bytes, texname=texname)
+ self._scale = scale
+ self._tfm = tfm
+ self.texname = texname
+ self._vf = vf
+ self.size = scale * (72.0 / (72.27 * 2**16))
+ try:
+ nchars = max(tfm.width) + 1
+ except ValueError:
+ nchars = 0
+ self.widths = [(1000*tfm.width.get(char, 0)) >> 20
+ for char in range(nchars)]
+
+ def __eq__(self, other):
+ return (type(self) == type(other)
+ and self.texname == other.texname and self.size == other.size)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __repr__(self):
+ return "<{}: {}>".format(type(self).__name__, self.texname)
+
+ def _width_of(self, char):
+ """Width of char in dvi units."""
+ width = self._tfm.width.get(char, None)
+ if width is not None:
+ return _mul2012(width, self._scale)
+ _log.debug('No width for char %d in font %s.', char, self.texname)
+ return 0
+
+ def _height_depth_of(self, char):
+ """Height and depth of char in dvi units."""
+ result = []
+ for metric, name in ((self._tfm.height, "height"),
+ (self._tfm.depth, "depth")):
+ value = metric.get(char, None)
+ if value is None:
+ _log.debug('No %s for char %d in font %s',
+ name, char, self.texname)
+ result.append(0)
+ else:
+ result.append(_mul2012(value, self._scale))
+ # cmsyXX (symbols font) glyph 0 ("minus") has a nonzero descent
+ # so that TeX aligns equations properly
+ # (https://tex.stackexchange.com/q/526103/)
+ # but we actually care about the rasterization depth to align
+ # the dvipng-generated images.
+ if re.match(br'^cmsy\d+$', self.texname) and char == 0:
+ result[-1] = 0
+ return result
+
+
+class Vf(Dvi):
+ r"""
+ A virtual font (\*.vf file) containing subroutines for dvi files.
+
+ Parameters
+ ----------
+ filename : str or path-like
+
+ Notes
+ -----
+ The virtual font format is a derivative of dvi:
+ http://mirrors.ctan.org/info/knuth/virtual-fonts
+ This class reuses some of the machinery of `Dvi`
+ but replaces the `_read` loop and dispatch mechanism.
+
+ Examples
+ --------
+ ::
+
+ vf = Vf(filename)
+ glyph = vf[code]
+ glyph.text, glyph.boxes, glyph.width
+ """
+
+ def __init__(self, filename):
+ super().__init__(filename, 0)
+ try:
+ self._first_font = None
+ self._chars = {}
+ self._read()
+ finally:
+ self.close()
+
+ def __getitem__(self, code):
+ return self._chars[code]
+
+ def _read(self):
+ """
+ Read one page from the file. Return True if successful,
+ False if there were no more pages.
+ """
+ packet_char, packet_ends = None, None
+ packet_len, packet_width = None, None
+ while True:
+ byte = self.file.read(1)[0]
+ # If we are in a packet, execute the dvi instructions
+ if self.state is _dvistate.inpage:
+ byte_at = self.file.tell()-1
+ if byte_at == packet_ends:
+ self._finalize_packet(packet_char, packet_width)
+ packet_len, packet_char, packet_width = None, None, None
+ # fall through to out-of-packet code
+ elif byte_at > packet_ends:
+ raise ValueError("Packet length mismatch in vf file")
+ else:
+ if byte in (139, 140) or byte >= 243:
+ raise ValueError(
+ "Inappropriate opcode %d in vf file" % byte)
+ Dvi._dtable[byte](self, byte)
+ continue
+
+ # We are outside a packet
+ if byte < 242: # a short packet (length given by byte)
+ packet_len = byte
+ packet_char, packet_width = self._arg(1), self._arg(3)
+ packet_ends = self._init_packet(byte)
+ self.state = _dvistate.inpage
+ elif byte == 242: # a long packet
+ packet_len, packet_char, packet_width = \
+ [self._arg(x) for x in (4, 4, 4)]
+ self._init_packet(packet_len)
+ elif 243 <= byte <= 246:
+ k = self._arg(byte - 242, byte == 246)
+ c, s, d, a, l = [self._arg(x) for x in (4, 4, 4, 1, 1)]
+ self._fnt_def_real(k, c, s, d, a, l)
+ if self._first_font is None:
+ self._first_font = k
+ elif byte == 247: # preamble
+ i, k = self._arg(1), self._arg(1)
+ x = self.file.read(k)
+ cs, ds = self._arg(4), self._arg(4)
+ self._pre(i, x, cs, ds)
+ elif byte == 248: # postamble (just some number of 248s)
+ break
+ else:
+ raise ValueError("Unknown vf opcode %d" % byte)
+
+ def _init_packet(self, pl):
+ if self.state != _dvistate.outer:
+ raise ValueError("Misplaced packet in vf file")
+ self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
+ self.stack, self.text, self.boxes = [], [], []
+ self.f = self._first_font
+ return self.file.tell() + pl
+
+ def _finalize_packet(self, packet_char, packet_width):
+ self._chars[packet_char] = Page(
+ text=self.text, boxes=self.boxes, width=packet_width,
+ height=None, descent=None)
+ self.state = _dvistate.outer
+
+ def _pre(self, i, x, cs, ds):
+ if self.state is not _dvistate.pre:
+ raise ValueError("pre command in middle of vf file")
+ if i != 202:
+ raise ValueError("Unknown vf format %d" % i)
+ if len(x):
+ _log.debug('vf file comment: %s', x)
+ self.state = _dvistate.outer
+ # cs = checksum, ds = design size
+
+
+def _mul2012(num1, num2):
+ """Multiply two numbers in 20.12 fixed point format."""
+ # Separated into a function because >> has surprising precedence
+ return (num1*num2) >> 20
+
+
+class Tfm:
+ """
+ A TeX Font Metric file.
+
+ This implementation covers only the bare minimum needed by the Dvi class.
+
+ Parameters
+ ----------
+ filename : str or path-like
+
+ Attributes
+ ----------
+ checksum : int
+ Used for verifying against the dvi file.
+ design_size : int
+ Design size of the font (unknown units)
+ width, height, depth : dict
+ Dimensions of each character, need to be scaled by the factor
+ specified in the dvi file. These are dicts because indexing may
+ not start from 0.
+ """
+ __slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
+
+ def __init__(self, filename):
+ _log.debug('opening tfm file %s', filename)
+ with open(filename, 'rb') as file:
+ header1 = file.read(24)
+ lh, bc, ec, nw, nh, nd = struct.unpack('!6H', header1[2:14])
+ _log.debug('lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d',
+ lh, bc, ec, nw, nh, nd)
+ header2 = file.read(4*lh)
+ self.checksum, self.design_size = struct.unpack('!2I', header2[:8])
+ # there is also encoding information etc.
+ char_info = file.read(4*(ec-bc+1))
+ widths = struct.unpack(f'!{nw}i', file.read(4*nw))
+ heights = struct.unpack(f'!{nh}i', file.read(4*nh))
+ depths = struct.unpack(f'!{nd}i', file.read(4*nd))
+ self.width, self.height, self.depth = {}, {}, {}
+ for idx, char in enumerate(range(bc, ec+1)):
+ byte0 = char_info[4*idx]
+ byte1 = char_info[4*idx+1]
+ self.width[char] = widths[byte0]
+ self.height[char] = heights[byte1 >> 4]
+ self.depth[char] = depths[byte1 & 0xf]
+
+
+PsFont = namedtuple('PsFont', 'texname psname effects encoding filename')
+
+
+class PsfontsMap:
+ """
+ A psfonts.map formatted file, mapping TeX fonts to PS fonts.
+
+ Parameters
+ ----------
+ filename : str or path-like
+
+ Notes
+ -----
+ For historical reasons, TeX knows many Type-1 fonts by different
+ names than the outside world. (For one thing, the names have to
+ fit in eight characters.) Also, TeX's native fonts are not Type-1
+ but Metafont, which is nontrivial to convert to PostScript except
+ as a bitmap. While high-quality conversions to Type-1 format exist
+ and are shipped with modern TeX distributions, we need to know
+ which Type-1 fonts are the counterparts of which native fonts. For
+ these reasons a mapping is needed from internal font names to font
+ file names.
+
+ A texmf tree typically includes mapping files called e.g.
+ :file:`psfonts.map`, :file:`pdftex.map`, or :file:`dvipdfm.map`.
+ The file :file:`psfonts.map` is used by :program:`dvips`,
+ :file:`pdftex.map` by :program:`pdfTeX`, and :file:`dvipdfm.map`
+ by :program:`dvipdfm`. :file:`psfonts.map` might avoid embedding
+ the 35 PostScript fonts (i.e., have no filename for them, as in
+ the Times-Bold example above), while the pdf-related files perhaps
+ only avoid the "Base 14" pdf fonts. But the user may have
+ configured these files differently.
+
+ Examples
+ --------
+ >>> map = PsfontsMap(find_tex_file('pdftex.map'))
+ >>> entry = map[b'ptmbo8r']
+ >>> entry.texname
+ b'ptmbo8r'
+ >>> entry.psname
+ b'Times-Bold'
+ >>> entry.encoding
+ '/usr/local/texlive/2008/texmf-dist/fonts/enc/dvips/base/8r.enc'
+ >>> entry.effects
+ {'slant': 0.16700000000000001}
+ >>> entry.filename
+ """
+ __slots__ = ('_filename', '_unparsed', '_parsed')
+
+ # Create a filename -> PsfontsMap cache, so that calling
+ # `PsfontsMap(filename)` with the same filename a second time immediately
+ # returns the same object.
+ @lru_cache()
+ def __new__(cls, filename):
+ self = object.__new__(cls)
+ self._filename = os.fsdecode(filename)
+ # Some TeX distributions have enormous pdftex.map files which would
+ # take hundreds of milliseconds to parse, but it is easy enough to just
+ # store the unparsed lines (keyed by the first word, which is the
+ # texname) and parse them on-demand.
+ with open(filename, 'rb') as file:
+ self._unparsed = {}
+ for line in file:
+ tfmname = line.split(b' ', 1)[0]
+ self._unparsed.setdefault(tfmname, []).append(line)
+ self._parsed = {}
+ return self
+
+ def __getitem__(self, texname):
+ assert isinstance(texname, bytes)
+ if texname in self._unparsed:
+ for line in self._unparsed.pop(texname):
+ if self._parse_and_cache_line(line):
+ break
+ try:
+ return self._parsed[texname]
+ except KeyError:
+ raise LookupError(
+ f"An associated PostScript font (required by Matplotlib) "
+ f"could not be found for TeX font {texname.decode('ascii')!r} "
+ f"in {self._filename!r}; this problem can often be solved by "
+ f"installing a suitable PostScript font package in your TeX "
+ f"package manager") from None
+
+ def _parse_and_cache_line(self, line):
+ """
+ Parse a line in the font mapping file.
+
+ The format is (partially) documented at
+ http://mirrors.ctan.org/systems/doc/pdftex/manual/pdftex-a.pdf
+ https://tug.org/texinfohtml/dvips.html#psfonts_002emap
+ Each line can have the following fields:
+
+ - tfmname (first, only required field),
+ - psname (defaults to tfmname, must come immediately after tfmname if
+ present),
+ - fontflags (integer, must come immediately after psname if present,
+ ignored by us),
+ - special (SlantFont and ExtendFont, only field that is double-quoted),
+ - fontfile, encodingfile (optional, prefixed by <, <<, or <[; << always
+ precedes a font, <[ always precedes an encoding, < can precede either
+ but then an encoding file must have extension .enc; < and << also
+ request different font subsetting behaviors but we ignore that; < can
+ be separated from the filename by whitespace).
+
+ special, fontfile, and encodingfile can appear in any order.
+ """
+ # If the map file specifies multiple encodings for a font, we
+ # follow pdfTeX in choosing the last one specified. Such
+ # entries are probably mistakes but they have occurred.
+ # https://tex.stackexchange.com/q/10826/
+
+ if not line or line.startswith((b" ", b"%", b"*", b";", b"#")):
+ return
+ tfmname = basename = special = encodingfile = fontfile = None
+ is_subsetted = is_t1 = is_truetype = False
+ matches = re.finditer(br'"([^"]*)(?:"|$)|(\S+)', line)
+ for match in matches:
+ quoted, unquoted = match.groups()
+ if unquoted:
+ if unquoted.startswith(b"<<"): # font
+ fontfile = unquoted[2:]
+ elif unquoted.startswith(b"<["): # encoding
+ encodingfile = unquoted[2:]
+ elif unquoted.startswith(b"<"): # font or encoding
+ word = (
+ # foo
+ unquoted[1:]
+ # < by itself => read the next word
+ or next(filter(None, next(matches).groups())))
+ if word.endswith(b".enc"):
+ encodingfile = word
+ else:
+ fontfile = word
+ is_subsetted = True
+ elif tfmname is None:
+ tfmname = unquoted
+ elif basename is None:
+ basename = unquoted
+ elif quoted:
+ special = quoted
+ effects = {}
+ if special:
+ words = reversed(special.split())
+ for word in words:
+ if word == b"SlantFont":
+ effects["slant"] = float(next(words))
+ elif word == b"ExtendFont":
+ effects["extend"] = float(next(words))
+
+ # Verify some properties of the line that would cause it to be ignored
+ # otherwise.
+ if fontfile is not None:
+ if fontfile.endswith((b".ttf", b".ttc")):
+ is_truetype = True
+ elif not fontfile.endswith(b".otf"):
+ is_t1 = True
+ elif basename is not None:
+ is_t1 = True
+ if is_truetype and is_subsetted and encodingfile is None:
+ return
+ if not is_t1 and ("slant" in effects or "extend" in effects):
+ return
+ if abs(effects.get("slant", 0)) > 1:
+ return
+ if abs(effects.get("extend", 0)) > 2:
+ return
+
+ if basename is None:
+ basename = tfmname
+ if encodingfile is not None:
+ encodingfile = find_tex_file(encodingfile)
+ if fontfile is not None:
+ fontfile = find_tex_file(fontfile)
+ self._parsed[tfmname] = PsFont(
+ texname=tfmname, psname=basename, effects=effects,
+ encoding=encodingfile, filename=fontfile)
+ return True
+
+
+def _parse_enc(path):
+ r"""
+ Parse a \*.enc file referenced from a psfonts.map style file.
+
+ The format supported by this function is a tiny subset of PostScript.
+
+ Parameters
+ ----------
+ path : os.PathLike
+
+ Returns
+ -------
+ list
+ The nth entry of the list is the PostScript glyph name of the nth
+ glyph.
+ """
+ no_comments = re.sub("%.*", "", Path(path).read_text(encoding="ascii"))
+ array = re.search(r"(?s)\[(.*)\]", no_comments).group(1)
+ lines = [line for line in array.split() if line]
+ if all(line.startswith("/") for line in lines):
+ return [line[1:] for line in lines]
+ else:
+ raise ValueError(
+ "Failed to parse {} as Postscript encoding".format(path))
+
+
+class _LuatexKpsewhich:
+ @lru_cache() # A singleton.
+ def __new__(cls):
+ self = object.__new__(cls)
+ self._proc = self._new_proc()
+ return self
+
+ def _new_proc(self):
+ return subprocess.Popen(
+ ["luatex", "--luaonly",
+ str(cbook._get_data_path("kpsewhich.lua"))],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+
+ def search(self, filename):
+ if self._proc.poll() is not None: # Dead, restart it.
+ self._proc = self._new_proc()
+ self._proc.stdin.write(os.fsencode(filename) + b"\n")
+ self._proc.stdin.flush()
+ out = self._proc.stdout.readline().rstrip()
+ return "" if out == b"nil" else os.fsdecode(out)
+
+
+@lru_cache()
+@_api.delete_parameter("3.5", "format")
+def find_tex_file(filename, format=None):
+ """
+ Find a file in the texmf tree.
+
+ Calls :program:`kpsewhich` which is an interface to the kpathsea
+ library [1]_. Most existing TeX distributions on Unix-like systems use
+ kpathsea. It is also available as part of MikTeX, a popular
+ distribution on Windows.
+
+ *If the file is not found, an empty string is returned*.
+
+ Parameters
+ ----------
+ filename : str or path-like
+ format : str or bytes
+ Used as the value of the ``--format`` option to :program:`kpsewhich`.
+ Could be e.g. 'tfm' or 'vf' to limit the search to that type of files.
+ Deprecated.
+
+ References
+ ----------
+ .. [1] `Kpathsea documentation `_
+ The library that :program:`kpsewhich` is part of.
+ """
+
+ # we expect these to always be ascii encoded, but use utf-8
+ # out of caution
+ if isinstance(filename, bytes):
+ filename = filename.decode('utf-8', errors='replace')
+ if isinstance(format, bytes):
+ format = format.decode('utf-8', errors='replace')
+
+ if format is None:
+ try:
+ lk = _LuatexKpsewhich()
+ except FileNotFoundError:
+ pass # Fallback to directly calling kpsewhich, as below.
+ else:
+ return lk.search(filename)
+
+ if os.name == 'nt':
+ # On Windows only, kpathsea can use utf-8 for cmd args and output.
+ # The `command_line_encoding` environment variable is set to force it
+ # to always use utf-8 encoding. See Matplotlib issue #11848.
+ kwargs = {'env': {**os.environ, 'command_line_encoding': 'utf-8'},
+ 'encoding': 'utf-8'}
+ else: # On POSIX, run through the equivalent of os.fsdecode().
+ kwargs = {'encoding': sys.getfilesystemencoding(),
+ 'errors': 'surrogatescape'}
+
+ cmd = ['kpsewhich']
+ if format is not None:
+ cmd += ['--format=' + format]
+ cmd += [filename]
+ try:
+ result = cbook._check_and_log_subprocess(cmd, _log, **kwargs)
+ except (FileNotFoundError, RuntimeError):
+ return ''
+ return result.rstrip('\n')
+
+
+@lru_cache()
+def _fontfile(cls, suffix, texname):
+ filename = find_tex_file(texname + suffix)
+ return cls(filename) if filename else None
+
+
+_tfmfile = partial(_fontfile, Tfm, ".tfm")
+_vffile = partial(_fontfile, Vf, ".vf")
+
+
+if __name__ == '__main__':
+ from argparse import ArgumentParser
+ import itertools
+
+ parser = ArgumentParser()
+ parser.add_argument("filename")
+ parser.add_argument("dpi", nargs="?", type=float, default=None)
+ args = parser.parse_args()
+ with Dvi(args.filename, args.dpi) as dvi:
+ fontmap = PsfontsMap(find_tex_file('pdftex.map'))
+ for page in dvi:
+ print(f"=== new page === "
+ f"(w: {page.width}, h: {page.height}, d: {page.descent})")
+ for font, group in itertools.groupby(
+ page.text, lambda text: text.font):
+ print(f"font: {font.texname.decode('latin-1')!r}\t"
+ f"scale: {font._scale / 2 ** 20}")
+ print("x", "y", "glyph", "chr", "w", "(glyphs)", sep="\t")
+ for text in group:
+ print(text.x, text.y, text.glyph,
+ chr(text.glyph) if chr(text.glyph).isprintable()
+ else ".",
+ text.width, sep="\t")
+ if page.boxes:
+ print("x", "y", "w", "h", "", "(boxes)", sep="\t")
+ for x, y, w, h in page.boxes:
+ print(x, y, w, h, sep="\t")
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/hatch.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/hatch.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c836cbf3cb983e39c22f2e944a3ed01397670d6
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/hatch.py
@@ -0,0 +1,225 @@
+"""Contains classes for generating hatch patterns."""
+
+import numpy as np
+
+from matplotlib import _api
+from matplotlib.path import Path
+
+
+class HatchPatternBase:
+ """The base class for a hatch pattern."""
+ pass
+
+
+class HorizontalHatch(HatchPatternBase):
+ def __init__(self, hatch, density):
+ self.num_lines = int((hatch.count('-') + hatch.count('+')) * density)
+ self.num_vertices = self.num_lines * 2
+
+ def set_vertices_and_codes(self, vertices, codes):
+ steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
+ retstep=True)
+ steps += stepsize / 2.
+ vertices[0::2, 0] = 0.0
+ vertices[0::2, 1] = steps
+ vertices[1::2, 0] = 1.0
+ vertices[1::2, 1] = steps
+ codes[0::2] = Path.MOVETO
+ codes[1::2] = Path.LINETO
+
+
+class VerticalHatch(HatchPatternBase):
+ def __init__(self, hatch, density):
+ self.num_lines = int((hatch.count('|') + hatch.count('+')) * density)
+ self.num_vertices = self.num_lines * 2
+
+ def set_vertices_and_codes(self, vertices, codes):
+ steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
+ retstep=True)
+ steps += stepsize / 2.
+ vertices[0::2, 0] = steps
+ vertices[0::2, 1] = 0.0
+ vertices[1::2, 0] = steps
+ vertices[1::2, 1] = 1.0
+ codes[0::2] = Path.MOVETO
+ codes[1::2] = Path.LINETO
+
+
+class NorthEastHatch(HatchPatternBase):
+ def __init__(self, hatch, density):
+ self.num_lines = int(
+ (hatch.count('/') + hatch.count('x') + hatch.count('X')) * density)
+ if self.num_lines:
+ self.num_vertices = (self.num_lines + 1) * 2
+ else:
+ self.num_vertices = 0
+
+ def set_vertices_and_codes(self, vertices, codes):
+ steps = np.linspace(-0.5, 0.5, self.num_lines + 1)
+ vertices[0::2, 0] = 0.0 + steps
+ vertices[0::2, 1] = 0.0 - steps
+ vertices[1::2, 0] = 1.0 + steps
+ vertices[1::2, 1] = 1.0 - steps
+ codes[0::2] = Path.MOVETO
+ codes[1::2] = Path.LINETO
+
+
+class SouthEastHatch(HatchPatternBase):
+ def __init__(self, hatch, density):
+ self.num_lines = int(
+ (hatch.count('\\') + hatch.count('x') + hatch.count('X'))
+ * density)
+ if self.num_lines:
+ self.num_vertices = (self.num_lines + 1) * 2
+ else:
+ self.num_vertices = 0
+
+ def set_vertices_and_codes(self, vertices, codes):
+ steps = np.linspace(-0.5, 0.5, self.num_lines + 1)
+ vertices[0::2, 0] = 0.0 + steps
+ vertices[0::2, 1] = 1.0 + steps
+ vertices[1::2, 0] = 1.0 + steps
+ vertices[1::2, 1] = 0.0 + steps
+ codes[0::2] = Path.MOVETO
+ codes[1::2] = Path.LINETO
+
+
+class Shapes(HatchPatternBase):
+ filled = False
+
+ def __init__(self, hatch, density):
+ if self.num_rows == 0:
+ self.num_shapes = 0
+ self.num_vertices = 0
+ else:
+ self.num_shapes = ((self.num_rows // 2 + 1) * (self.num_rows + 1) +
+ (self.num_rows // 2) * self.num_rows)
+ self.num_vertices = (self.num_shapes *
+ len(self.shape_vertices) *
+ (1 if self.filled else 2))
+
+ def set_vertices_and_codes(self, vertices, codes):
+ offset = 1.0 / self.num_rows
+ shape_vertices = self.shape_vertices * offset * self.size
+ shape_codes = self.shape_codes
+ if not self.filled:
+ shape_vertices = np.concatenate( # Forward, then backward.
+ [shape_vertices, shape_vertices[::-1] * 0.9])
+ shape_codes = np.concatenate([shape_codes, shape_codes])
+ vertices_parts = []
+ codes_parts = []
+ for row in range(self.num_rows + 1):
+ if row % 2 == 0:
+ cols = np.linspace(0, 1, self.num_rows + 1)
+ else:
+ cols = np.linspace(offset / 2, 1 - offset / 2, self.num_rows)
+ row_pos = row * offset
+ for col_pos in cols:
+ vertices_parts.append(shape_vertices + [col_pos, row_pos])
+ codes_parts.append(shape_codes)
+ np.concatenate(vertices_parts, out=vertices)
+ np.concatenate(codes_parts, out=codes)
+
+
+class Circles(Shapes):
+ def __init__(self, hatch, density):
+ path = Path.unit_circle()
+ self.shape_vertices = path.vertices
+ self.shape_codes = path.codes
+ super().__init__(hatch, density)
+
+
+class SmallCircles(Circles):
+ size = 0.2
+
+ def __init__(self, hatch, density):
+ self.num_rows = (hatch.count('o')) * density
+ super().__init__(hatch, density)
+
+
+class LargeCircles(Circles):
+ size = 0.35
+
+ def __init__(self, hatch, density):
+ self.num_rows = (hatch.count('O')) * density
+ super().__init__(hatch, density)
+
+
+class SmallFilledCircles(Circles):
+ size = 0.1
+ filled = True
+
+ def __init__(self, hatch, density):
+ self.num_rows = (hatch.count('.')) * density
+ super().__init__(hatch, density)
+
+
+class Stars(Shapes):
+ size = 1.0 / 3.0
+ filled = True
+
+ def __init__(self, hatch, density):
+ self.num_rows = (hatch.count('*')) * density
+ path = Path.unit_regular_star(5)
+ self.shape_vertices = path.vertices
+ self.shape_codes = np.full(len(self.shape_vertices), Path.LINETO,
+ dtype=Path.code_type)
+ self.shape_codes[0] = Path.MOVETO
+ super().__init__(hatch, density)
+
+_hatch_types = [
+ HorizontalHatch,
+ VerticalHatch,
+ NorthEastHatch,
+ SouthEastHatch,
+ SmallCircles,
+ LargeCircles,
+ SmallFilledCircles,
+ Stars
+ ]
+
+
+def _validate_hatch_pattern(hatch):
+ valid_hatch_patterns = set(r'-+|/\xXoO.*')
+ if hatch is not None:
+ invalids = set(hatch).difference(valid_hatch_patterns)
+ if invalids:
+ valid = ''.join(sorted(valid_hatch_patterns))
+ invalids = ''.join(sorted(invalids))
+ _api.warn_deprecated(
+ '3.4',
+ removal='3.7', # one release after custom hatches (#20690)
+ message=f'hatch must consist of a string of "{valid}" or '
+ 'None, but found the following invalid values '
+ f'"{invalids}". Passing invalid values is deprecated '
+ 'since %(since)s and will become an error %(removal)s.'
+ )
+
+
+def get_path(hatchpattern, density=6):
+ """
+ Given a hatch specifier, *hatchpattern*, generates Path to render
+ the hatch in a unit square. *density* is the number of lines per
+ unit square.
+ """
+ density = int(density)
+
+ patterns = [hatch_type(hatchpattern, density)
+ for hatch_type in _hatch_types]
+ num_vertices = sum([pattern.num_vertices for pattern in patterns])
+
+ if num_vertices == 0:
+ return Path(np.empty((0, 2)))
+
+ vertices = np.empty((num_vertices, 2))
+ codes = np.empty(num_vertices, Path.code_type)
+
+ cursor = 0
+ for pattern in patterns:
+ if pattern.num_vertices != 0:
+ vertices_chunk = vertices[cursor:cursor + pattern.num_vertices]
+ codes_chunk = codes[cursor:cursor + pattern.num_vertices]
+ pattern.set_vertices_and_codes(vertices_chunk, codes_chunk)
+ cursor += pattern.num_vertices
+
+ return Path(vertices, codes)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/mlab.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/mlab.py
new file mode 100644
index 0000000000000000000000000000000000000000..09b6c6581fbaca1e4c1bebbd55f5659ae468baa4
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/mlab.py
@@ -0,0 +1,986 @@
+"""
+Numerical Python functions written for compatibility with MATLAB
+commands with the same names. Most numerical Python functions can be found in
+the `NumPy`_ and `SciPy`_ libraries. What remains here is code for performing
+spectral computations and kernel density estimations.
+
+.. _NumPy: https://numpy.org
+.. _SciPy: https://www.scipy.org
+
+Spectral functions
+------------------
+
+`cohere`
+ Coherence (normalized cross spectral density)
+
+`csd`
+ Cross spectral density using Welch's average periodogram
+
+`detrend`
+ Remove the mean or best fit line from an array
+
+`psd`
+ Power spectral density using Welch's average periodogram
+
+`specgram`
+ Spectrogram (spectrum over segments of time)
+
+`complex_spectrum`
+ Return the complex-valued frequency spectrum of a signal
+
+`magnitude_spectrum`
+ Return the magnitude of the frequency spectrum of a signal
+
+`angle_spectrum`
+ Return the angle (wrapped phase) of the frequency spectrum of a signal
+
+`phase_spectrum`
+ Return the phase (unwrapped angle) of the frequency spectrum of a signal
+
+`detrend_mean`
+ Remove the mean from a line.
+
+`detrend_linear`
+ Remove the best fit line from a line.
+
+`detrend_none`
+ Return the original line.
+
+`stride_windows`
+ Get all windows in an array in a memory-efficient manner
+"""
+
+import functools
+from numbers import Number
+
+import numpy as np
+
+from matplotlib import _api
+import matplotlib.cbook as cbook
+from matplotlib import docstring
+
+
+def window_hanning(x):
+ """
+ Return x times the hanning window of len(x).
+
+ See Also
+ --------
+ window_none : Another window algorithm.
+ """
+ return np.hanning(len(x))*x
+
+
+def window_none(x):
+ """
+ No window function; simply return x.
+
+ See Also
+ --------
+ window_hanning : Another window algorithm.
+ """
+ return x
+
+
+def detrend(x, key=None, axis=None):
+ """
+ Return x with its trend removed.
+
+ Parameters
+ ----------
+ x : array or sequence
+ Array or sequence containing the data.
+
+ key : {'default', 'constant', 'mean', 'linear', 'none'} or function
+ The detrending algorithm to use. 'default', 'mean', and 'constant' are
+ the same as `detrend_mean`. 'linear' is the same as `detrend_linear`.
+ 'none' is the same as `detrend_none`. The default is 'mean'. See the
+ corresponding functions for more details regarding the algorithms. Can
+ also be a function that carries out the detrend operation.
+
+ axis : int
+ The axis along which to do the detrending.
+
+ See Also
+ --------
+ detrend_mean : Implementation of the 'mean' algorithm.
+ detrend_linear : Implementation of the 'linear' algorithm.
+ detrend_none : Implementation of the 'none' algorithm.
+ """
+ if key is None or key in ['constant', 'mean', 'default']:
+ return detrend(x, key=detrend_mean, axis=axis)
+ elif key == 'linear':
+ return detrend(x, key=detrend_linear, axis=axis)
+ elif key == 'none':
+ return detrend(x, key=detrend_none, axis=axis)
+ elif callable(key):
+ x = np.asarray(x)
+ if axis is not None and axis + 1 > x.ndim:
+ raise ValueError(f'axis(={axis}) out of bounds')
+ if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1):
+ return key(x)
+ # try to use the 'axis' argument if the function supports it,
+ # otherwise use apply_along_axis to do it
+ try:
+ return key(x, axis=axis)
+ except TypeError:
+ return np.apply_along_axis(key, axis=axis, arr=x)
+ else:
+ raise ValueError(
+ f"Unknown value for key: {key!r}, must be one of: 'default', "
+ f"'constant', 'mean', 'linear', or a function")
+
+
+def detrend_mean(x, axis=None):
+ """
+ Return x minus the mean(x).
+
+ Parameters
+ ----------
+ x : array or sequence
+ Array or sequence containing the data
+ Can have any dimensionality
+
+ axis : int
+ The axis along which to take the mean. See numpy.mean for a
+ description of this argument.
+
+ See Also
+ --------
+ detrend_linear : Another detrend algorithm.
+ detrend_none : Another detrend algorithm.
+ detrend : A wrapper around all the detrend algorithms.
+ """
+ x = np.asarray(x)
+
+ if axis is not None and axis+1 > x.ndim:
+ raise ValueError('axis(=%s) out of bounds' % axis)
+
+ return x - x.mean(axis, keepdims=True)
+
+
+def detrend_none(x, axis=None):
+ """
+ Return x: no detrending.
+
+ Parameters
+ ----------
+ x : any object
+ An object containing the data
+
+ axis : int
+ This parameter is ignored.
+ It is included for compatibility with detrend_mean
+
+ See Also
+ --------
+ detrend_mean : Another detrend algorithm.
+ detrend_linear : Another detrend algorithm.
+ detrend : A wrapper around all the detrend algorithms.
+ """
+ return x
+
+
+def detrend_linear(y):
+ """
+ Return x minus best fit line; 'linear' detrending.
+
+ Parameters
+ ----------
+ y : 0-D or 1-D array or sequence
+ Array or sequence containing the data
+
+ See Also
+ --------
+ detrend_mean : Another detrend algorithm.
+ detrend_none : Another detrend algorithm.
+ detrend : A wrapper around all the detrend algorithms.
+ """
+ # This is faster than an algorithm based on linalg.lstsq.
+ y = np.asarray(y)
+
+ if y.ndim > 1:
+ raise ValueError('y cannot have ndim > 1')
+
+ # short-circuit 0-D array.
+ if not y.ndim:
+ return np.array(0., dtype=y.dtype)
+
+ x = np.arange(y.size, dtype=float)
+
+ C = np.cov(x, y, bias=1)
+ b = C[0, 1]/C[0, 0]
+
+ a = y.mean() - b*x.mean()
+ return y - (b*x + a)
+
+
+def stride_windows(x, n, noverlap=None, axis=0):
+ """
+ Get all windows of x with length n as a single array,
+ using strides to avoid data duplication.
+
+ .. warning::
+
+ It is not safe to write to the output array. Multiple
+ elements may point to the same piece of memory,
+ so modifying one value may change others.
+
+ Parameters
+ ----------
+ x : 1D array or sequence
+ Array or sequence containing the data.
+ n : int
+ The number of data points in each window.
+ noverlap : int, default: 0 (no overlap)
+ The overlap between adjacent windows.
+ axis : int
+ The axis along which the windows will run.
+
+ References
+ ----------
+ `stackoverflow: Rolling window for 1D arrays in Numpy?
+ `_
+ `stackoverflow: Using strides for an efficient moving average filter
+ `_
+ """
+ if noverlap is None:
+ noverlap = 0
+
+ if noverlap >= n:
+ raise ValueError('noverlap must be less than n')
+ if n < 1:
+ raise ValueError('n cannot be less than 1')
+
+ x = np.asarray(x)
+
+ if x.ndim != 1:
+ raise ValueError('only 1-dimensional arrays can be used')
+ if n == 1 and noverlap == 0:
+ if axis == 0:
+ return x[np.newaxis]
+ else:
+ return x[np.newaxis].transpose()
+ if n > x.size:
+ raise ValueError('n cannot be greater than the length of x')
+
+ # np.lib.stride_tricks.as_strided easily leads to memory corruption for
+ # non integer shape and strides, i.e. noverlap or n. See #3845.
+ noverlap = int(noverlap)
+ n = int(n)
+
+ step = n - noverlap
+ if axis == 0:
+ shape = (n, (x.shape[-1]-noverlap)//step)
+ strides = (x.strides[0], step*x.strides[0])
+ else:
+ shape = ((x.shape[-1]-noverlap)//step, n)
+ strides = (step*x.strides[0], x.strides[0])
+ return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
+
+
+def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None,
+ window=None, noverlap=None, pad_to=None,
+ sides=None, scale_by_freq=None, mode=None):
+ """
+ Private helper implementing the common parts between the psd, csd,
+ spectrogram and complex, magnitude, angle, and phase spectrums.
+ """
+ if y is None:
+ # if y is None use x for y
+ same_data = True
+ else:
+ # The checks for if y is x are so that we can use the same function to
+ # implement the core of psd(), csd(), and spectrogram() without doing
+ # extra calculations. We return the unaveraged Pxy, freqs, and t.
+ same_data = y is x
+
+ if Fs is None:
+ Fs = 2
+ if noverlap is None:
+ noverlap = 0
+ if detrend_func is None:
+ detrend_func = detrend_none
+ if window is None:
+ window = window_hanning
+
+ # if NFFT is set to None use the whole signal
+ if NFFT is None:
+ NFFT = 256
+
+ if mode is None or mode == 'default':
+ mode = 'psd'
+ _api.check_in_list(
+ ['default', 'psd', 'complex', 'magnitude', 'angle', 'phase'],
+ mode=mode)
+
+ if not same_data and mode != 'psd':
+ raise ValueError("x and y must be equal if mode is not 'psd'")
+
+ # Make sure we're dealing with a numpy array. If y and x were the same
+ # object to start with, keep them that way
+ x = np.asarray(x)
+ if not same_data:
+ y = np.asarray(y)
+
+ if sides is None or sides == 'default':
+ if np.iscomplexobj(x):
+ sides = 'twosided'
+ else:
+ sides = 'onesided'
+ _api.check_in_list(['default', 'onesided', 'twosided'], sides=sides)
+
+ # zero pad x and y up to NFFT if they are shorter than NFFT
+ if len(x) < NFFT:
+ n = len(x)
+ x = np.resize(x, NFFT)
+ x[n:] = 0
+
+ if not same_data and len(y) < NFFT:
+ n = len(y)
+ y = np.resize(y, NFFT)
+ y[n:] = 0
+
+ if pad_to is None:
+ pad_to = NFFT
+
+ if mode != 'psd':
+ scale_by_freq = False
+ elif scale_by_freq is None:
+ scale_by_freq = True
+
+ # For real x, ignore the negative frequencies unless told otherwise
+ if sides == 'twosided':
+ numFreqs = pad_to
+ if pad_to % 2:
+ freqcenter = (pad_to - 1)//2 + 1
+ else:
+ freqcenter = pad_to//2
+ scaling_factor = 1.
+ elif sides == 'onesided':
+ if pad_to % 2:
+ numFreqs = (pad_to + 1)//2
+ else:
+ numFreqs = pad_to//2 + 1
+ scaling_factor = 2.
+
+ if not np.iterable(window):
+ window = window(np.ones(NFFT, x.dtype))
+ if len(window) != NFFT:
+ raise ValueError(
+ "The window length must match the data's first dimension")
+
+ result = stride_windows(x, NFFT, noverlap, axis=0)
+ result = detrend(result, detrend_func, axis=0)
+ result = result * window.reshape((-1, 1))
+ result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
+ freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
+
+ if not same_data:
+ # if same_data is False, mode must be 'psd'
+ resultY = stride_windows(y, NFFT, noverlap)
+ resultY = detrend(resultY, detrend_func, axis=0)
+ resultY = resultY * window.reshape((-1, 1))
+ resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
+ result = np.conj(result) * resultY
+ elif mode == 'psd':
+ result = np.conj(result) * result
+ elif mode == 'magnitude':
+ result = np.abs(result) / np.abs(window).sum()
+ elif mode == 'angle' or mode == 'phase':
+ # we unwrap the phase later to handle the onesided vs. twosided case
+ result = np.angle(result)
+ elif mode == 'complex':
+ result /= np.abs(window).sum()
+
+ if mode == 'psd':
+
+ # Also include scaling factors for one-sided densities and dividing by
+ # the sampling frequency, if desired. Scale everything, except the DC
+ # component and the NFFT/2 component:
+
+ # if we have a even number of frequencies, don't scale NFFT/2
+ if not NFFT % 2:
+ slc = slice(1, -1, None)
+ # if we have an odd number, just don't scale DC
+ else:
+ slc = slice(1, None, None)
+
+ result[slc] *= scaling_factor
+
+ # MATLAB divides by the sampling frequency so that density function
+ # has units of dB/Hz and can be integrated by the plotted frequency
+ # values. Perform the same scaling here.
+ if scale_by_freq:
+ result /= Fs
+ # Scale the spectrum by the norm of the window to compensate for
+ # windowing loss; see Bendat & Piersol Sec 11.5.2.
+ result /= (np.abs(window)**2).sum()
+ else:
+ # In this case, preserve power in the segment, not amplitude
+ result /= np.abs(window).sum()**2
+
+ t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
+
+ if sides == 'twosided':
+ # center the frequency range at zero
+ freqs = np.roll(freqs, -freqcenter, axis=0)
+ result = np.roll(result, -freqcenter, axis=0)
+ elif not pad_to % 2:
+ # get the last value correctly, it is negative otherwise
+ freqs[-1] *= -1
+
+ # we unwrap the phase here to handle the onesided vs. twosided case
+ if mode == 'phase':
+ result = np.unwrap(result, axis=0)
+
+ return result, freqs, t
+
+
+def _single_spectrum_helper(
+ mode, x, Fs=None, window=None, pad_to=None, sides=None):
+ """
+ Private helper implementing the commonality between the complex, magnitude,
+ angle, and phase spectrums.
+ """
+ _api.check_in_list(['complex', 'magnitude', 'angle', 'phase'], mode=mode)
+
+ if pad_to is None:
+ pad_to = len(x)
+
+ spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
+ detrend_func=detrend_none, window=window,
+ noverlap=0, pad_to=pad_to,
+ sides=sides,
+ scale_by_freq=False,
+ mode=mode)
+ if mode != 'complex':
+ spec = spec.real
+
+ if spec.ndim == 2 and spec.shape[1] == 1:
+ spec = spec[:, 0]
+
+ return spec, freqs
+
+
+# Split out these keyword docs so that they can be used elsewhere
+docstring.interpd.update(
+ Spectral="""\
+Fs : float, default: 2
+ The sampling frequency (samples per time unit). It is used to calculate
+ the Fourier frequencies, *freqs*, in cycles per time unit.
+
+window : callable or ndarray, default: `.window_hanning`
+ A function or a vector of length *NFFT*. To create window vectors see
+ `.window_hanning`, `.window_none`, `numpy.blackman`, `numpy.hamming`,
+ `numpy.bartlett`, `scipy.signal`, `scipy.signal.get_window`, etc. If a
+ function is passed as the argument, it must take a data segment as an
+ argument and return the windowed version of the segment.
+
+sides : {'default', 'onesided', 'twosided'}, optional
+ Which sides of the spectrum to return. 'default' is one-sided for real
+ data and two-sided for complex data. 'onesided' forces the return of a
+ one-sided spectrum, while 'twosided' forces two-sided.""",
+
+ Single_Spectrum="""\
+pad_to : int, optional
+ The number of points to which the data segment is padded when performing
+ the FFT. While not increasing the actual resolution of the spectrum (the
+ minimum distance between resolvable peaks), this can give more points in
+ the plot, allowing for more detail. This corresponds to the *n* parameter
+ in the call to fft(). The default is None, which sets *pad_to* equal to
+ the length of the input signal (i.e. no padding).""",
+
+ PSD="""\
+pad_to : int, optional
+ The number of points to which the data segment is padded when performing
+ the FFT. This can be different from *NFFT*, which specifies the number
+ of data points used. While not increasing the actual resolution of the
+ spectrum (the minimum distance between resolvable peaks), this can give
+ more points in the plot, allowing for more detail. This corresponds to
+ the *n* parameter in the call to fft(). The default is None, which sets
+ *pad_to* equal to *NFFT*
+
+NFFT : int, default: 256
+ The number of data points used in each block for the FFT. A power 2 is
+ most efficient. This should *NOT* be used to get zero padding, or the
+ scaling of the result will be incorrect; use *pad_to* for this instead.
+
+detrend : {'none', 'mean', 'linear'} or callable, default: 'none'
+ The function applied to each segment before fft-ing, designed to remove
+ the mean or linear trend. Unlike in MATLAB, where the *detrend* parameter
+ is a vector, in Matplotlib it is a function. The :mod:`~matplotlib.mlab`
+ module defines `.detrend_none`, `.detrend_mean`, and `.detrend_linear`,
+ but you can use a custom function as well. You can also use a string to
+ choose one of the functions: 'none' calls `.detrend_none`. 'mean' calls
+ `.detrend_mean`. 'linear' calls `.detrend_linear`.
+
+scale_by_freq : bool, default: True
+ Whether the resulting density values should be scaled by the scaling
+ frequency, which gives density in units of Hz^-1. This allows for
+ integration over the returned frequency values. The default is True for
+ MATLAB compatibility.""")
+
+
+@docstring.dedent_interpd
+def psd(x, NFFT=None, Fs=None, detrend=None, window=None,
+ noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
+ r"""
+ Compute the power spectral density.
+
+ The power spectral density :math:`P_{xx}` by Welch's average
+ periodogram method. The vector *x* is divided into *NFFT* length
+ segments. Each segment is detrended by function *detrend* and
+ windowed by function *window*. *noverlap* gives the length of
+ the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
+ of each segment :math:`i` are averaged to compute :math:`P_{xx}`.
+
+ If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
+
+ Parameters
+ ----------
+ x : 1-D array or sequence
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(PSD)s
+
+ noverlap : int, default: 0 (no overlap)
+ The number of points of overlap between segments.
+
+ Returns
+ -------
+ Pxx : 1-D array
+ The values for the power spectrum :math:`P_{xx}` (real valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *Pxx*
+
+ References
+ ----------
+ Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
+ Wiley & Sons (1986)
+
+ See Also
+ --------
+ specgram
+ `specgram` differs in the default overlap; in not returning the mean of
+ the segment periodograms; and in returning the times of the segments.
+
+ magnitude_spectrum : returns the magnitude spectrum.
+
+ csd : returns the spectral density between two signals.
+ """
+ Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend,
+ window=window, noverlap=noverlap, pad_to=pad_to,
+ sides=sides, scale_by_freq=scale_by_freq)
+ return Pxx.real, freqs
+
+
+@docstring.dedent_interpd
+def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None,
+ noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
+ """
+ Compute the cross-spectral density.
+
+ The cross spectral density :math:`P_{xy}` by Welch's average
+ periodogram method. The vectors *x* and *y* are divided into
+ *NFFT* length segments. Each segment is detrended by function
+ *detrend* and windowed by function *window*. *noverlap* gives
+ the length of the overlap between segments. The product of
+ the direct FFTs of *x* and *y* are averaged over each segment
+ to compute :math:`P_{xy}`, with a scaling to correct for power
+ loss due to windowing.
+
+ If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
+ padded to *NFFT*.
+
+ Parameters
+ ----------
+ x, y : 1-D arrays or sequences
+ Arrays or sequences containing the data
+
+ %(Spectral)s
+
+ %(PSD)s
+
+ noverlap : int, default: 0 (no overlap)
+ The number of points of overlap between segments.
+
+ Returns
+ -------
+ Pxy : 1-D array
+ The values for the cross spectrum :math:`P_{xy}` before scaling (real
+ valued)
+
+ freqs : 1-D array
+ The frequencies corresponding to the elements in *Pxy*
+
+ References
+ ----------
+ Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John
+ Wiley & Sons (1986)
+
+ See Also
+ --------
+ psd : equivalent to setting ``y = x``.
+ """
+ if NFFT is None:
+ NFFT = 256
+ Pxy, freqs, _ = _spectral_helper(x=x, y=y, NFFT=NFFT, Fs=Fs,
+ detrend_func=detrend, window=window,
+ noverlap=noverlap, pad_to=pad_to,
+ sides=sides, scale_by_freq=scale_by_freq,
+ mode='psd')
+
+ if Pxy.ndim == 2:
+ if Pxy.shape[1] > 1:
+ Pxy = Pxy.mean(axis=1)
+ else:
+ Pxy = Pxy[:, 0]
+ return Pxy, freqs
+
+
+_single_spectrum_docs = """\
+Compute the {quantity} of *x*.
+Data is padded to a length of *pad_to* and the windowing function *window* is
+applied to the signal.
+
+Parameters
+----------
+x : 1-D array or sequence
+ Array or sequence containing the data
+
+{Spectral}
+
+{Single_Spectrum}
+
+Returns
+-------
+spectrum : 1-D array
+ The {quantity}.
+freqs : 1-D array
+ The frequencies corresponding to the elements in *spectrum*.
+
+See Also
+--------
+psd
+ Returns the power spectral density.
+complex_spectrum
+ Returns the complex-valued frequency spectrum.
+magnitude_spectrum
+ Returns the absolute value of the `complex_spectrum`.
+angle_spectrum
+ Returns the angle of the `complex_spectrum`.
+phase_spectrum
+ Returns the phase (unwrapped angle) of the `complex_spectrum`.
+specgram
+ Can return the complex spectrum of segments within the signal.
+"""
+
+
+complex_spectrum = functools.partial(_single_spectrum_helper, "complex")
+complex_spectrum.__doc__ = _single_spectrum_docs.format(
+ quantity="complex-valued frequency spectrum",
+ **docstring.interpd.params)
+magnitude_spectrum = functools.partial(_single_spectrum_helper, "magnitude")
+magnitude_spectrum.__doc__ = _single_spectrum_docs.format(
+ quantity="magnitude (absolute value) of the frequency spectrum",
+ **docstring.interpd.params)
+angle_spectrum = functools.partial(_single_spectrum_helper, "angle")
+angle_spectrum.__doc__ = _single_spectrum_docs.format(
+ quantity="angle of the frequency spectrum (wrapped phase spectrum)",
+ **docstring.interpd.params)
+phase_spectrum = functools.partial(_single_spectrum_helper, "phase")
+phase_spectrum.__doc__ = _single_spectrum_docs.format(
+ quantity="phase of the frequency spectrum (unwrapped phase spectrum)",
+ **docstring.interpd.params)
+
+
+@docstring.dedent_interpd
+def specgram(x, NFFT=None, Fs=None, detrend=None, window=None,
+ noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
+ mode=None):
+ """
+ Compute a spectrogram.
+
+ Compute and plot a spectrogram of data in x. Data are split into
+ NFFT length segments and the spectrum of each section is
+ computed. The windowing function window is applied to each
+ segment, and the amount of overlap of each segment is
+ specified with noverlap.
+
+ Parameters
+ ----------
+ x : array-like
+ 1-D array or sequence.
+
+ %(Spectral)s
+
+ %(PSD)s
+
+ noverlap : int, default: 128
+ The number of points of overlap between blocks.
+ mode : str, default: 'psd'
+ What sort of spectrum to use:
+ 'psd'
+ Returns the power spectral density.
+ 'complex'
+ Returns the complex-valued frequency spectrum.
+ 'magnitude'
+ Returns the magnitude spectrum.
+ 'angle'
+ Returns the phase spectrum without unwrapping.
+ 'phase'
+ Returns the phase spectrum with unwrapping.
+
+ Returns
+ -------
+ spectrum : array-like
+ 2D array, columns are the periodograms of successive segments.
+
+ freqs : array-like
+ 1-D array, frequencies corresponding to the rows in *spectrum*.
+
+ t : array-like
+ 1-D array, the times corresponding to midpoints of segments
+ (i.e the columns in *spectrum*).
+
+ See Also
+ --------
+ psd : differs in the overlap and in the return values.
+ complex_spectrum : similar, but with complex valued frequencies.
+ magnitude_spectrum : similar single segment when mode is 'magnitude'.
+ angle_spectrum : similar to single segment when mode is 'angle'.
+ phase_spectrum : similar to single segment when mode is 'phase'.
+
+ Notes
+ -----
+ detrend and scale_by_freq only apply when *mode* is set to 'psd'.
+
+ """
+ if noverlap is None:
+ noverlap = 128 # default in _spectral_helper() is noverlap = 0
+ if NFFT is None:
+ NFFT = 256 # same default as in _spectral_helper()
+ if len(x) <= NFFT:
+ _api.warn_external("Only one segment is calculated since parameter "
+ f"NFFT (={NFFT}) >= signal length (={len(x)}).")
+
+ spec, freqs, t = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs,
+ detrend_func=detrend, window=window,
+ noverlap=noverlap, pad_to=pad_to,
+ sides=sides,
+ scale_by_freq=scale_by_freq,
+ mode=mode)
+
+ if mode != 'complex':
+ spec = spec.real # Needed since helper implements generically
+
+ return spec, freqs, t
+
+
+@docstring.dedent_interpd
+def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
+ noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
+ r"""
+ The coherence between *x* and *y*. Coherence is the normalized
+ cross spectral density:
+
+ .. math::
+
+ C_{xy} = \frac{|P_{xy}|^2}{P_{xx}P_{yy}}
+
+ Parameters
+ ----------
+ x, y
+ Array or sequence containing the data
+
+ %(Spectral)s
+
+ %(PSD)s
+
+ noverlap : int, default: 0 (no overlap)
+ The number of points of overlap between segments.
+
+ Returns
+ -------
+ Cxy : 1-D array
+ The coherence vector.
+ freqs : 1-D array
+ The frequencies for the elements in *Cxy*.
+
+ See Also
+ --------
+ :func:`psd`, :func:`csd` :
+ For information about the methods used to compute :math:`P_{xy}`,
+ :math:`P_{xx}` and :math:`P_{yy}`.
+ """
+ if len(x) < 2 * NFFT:
+ raise ValueError(
+ "Coherence is calculated by averaging over *NFFT* length "
+ "segments. Your signal is too short for your choice of *NFFT*.")
+ Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
+ scale_by_freq)
+ Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
+ scale_by_freq)
+ Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
+ scale_by_freq)
+ Cxy = np.abs(Pxy) ** 2 / (Pxx * Pyy)
+ return Cxy, f
+
+
+class GaussianKDE:
+ """
+ Representation of a kernel-density estimate using Gaussian kernels.
+
+ Parameters
+ ----------
+ dataset : array-like
+ Datapoints to estimate from. In case of univariate data this is a 1-D
+ array, otherwise a 2D array with shape (# of dims, # of data).
+
+ bw_method : str, scalar or callable, optional
+ The method used to calculate the estimator bandwidth. This can be
+ 'scott', 'silverman', a scalar constant or a callable. If a
+ scalar, this will be used directly as `kde.factor`. If a
+ callable, it should take a `GaussianKDE` instance as only
+ parameter and return a scalar. If None (default), 'scott' is used.
+
+ Attributes
+ ----------
+ dataset : ndarray
+ The dataset with which `gaussian_kde` was initialized.
+
+ dim : int
+ Number of dimensions.
+
+ num_dp : int
+ Number of datapoints.
+
+ factor : float
+ The bandwidth factor, obtained from `kde.covariance_factor`, with which
+ the covariance matrix is multiplied.
+
+ covariance : ndarray
+ The covariance matrix of *dataset*, scaled by the calculated bandwidth
+ (`kde.factor`).
+
+ inv_cov : ndarray
+ The inverse of *covariance*.
+
+ Methods
+ -------
+ kde.evaluate(points) : ndarray
+ Evaluate the estimated pdf on a provided set of points.
+
+ kde(points) : ndarray
+ Same as kde.evaluate(points)
+
+ """
+
+ # This implementation with minor modification was too good to pass up.
+ # from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
+
+ def __init__(self, dataset, bw_method=None):
+ self.dataset = np.atleast_2d(dataset)
+ if not np.array(self.dataset).size > 1:
+ raise ValueError("`dataset` input should have multiple elements.")
+
+ self.dim, self.num_dp = np.array(self.dataset).shape
+
+ if bw_method is None:
+ pass
+ elif cbook._str_equal(bw_method, 'scott'):
+ self.covariance_factor = self.scotts_factor
+ elif cbook._str_equal(bw_method, 'silverman'):
+ self.covariance_factor = self.silverman_factor
+ elif isinstance(bw_method, Number):
+ self._bw_method = 'use constant'
+ self.covariance_factor = lambda: bw_method
+ elif callable(bw_method):
+ self._bw_method = bw_method
+ self.covariance_factor = lambda: self._bw_method(self)
+ else:
+ raise ValueError("`bw_method` should be 'scott', 'silverman', a "
+ "scalar or a callable")
+
+ # Computes the covariance matrix for each Gaussian kernel using
+ # covariance_factor().
+
+ self.factor = self.covariance_factor()
+ # Cache covariance and inverse covariance of the data
+ if not hasattr(self, '_data_inv_cov'):
+ self.data_covariance = np.atleast_2d(
+ np.cov(
+ self.dataset,
+ rowvar=1,
+ bias=False))
+ self.data_inv_cov = np.linalg.inv(self.data_covariance)
+
+ self.covariance = self.data_covariance * self.factor ** 2
+ self.inv_cov = self.data_inv_cov / self.factor ** 2
+ self.norm_factor = (np.sqrt(np.linalg.det(2 * np.pi * self.covariance))
+ * self.num_dp)
+
+ def scotts_factor(self):
+ return np.power(self.num_dp, -1. / (self.dim + 4))
+
+ def silverman_factor(self):
+ return np.power(
+ self.num_dp * (self.dim + 2.0) / 4.0, -1. / (self.dim + 4))
+
+ # Default method to calculate bandwidth, can be overwritten by subclass
+ covariance_factor = scotts_factor
+
+ def evaluate(self, points):
+ """
+ Evaluate the estimated pdf on a set of points.
+
+ Parameters
+ ----------
+ points : (# of dimensions, # of points)-array
+ Alternatively, a (# of dimensions,) vector can be passed in and
+ treated as a single point.
+
+ Returns
+ -------
+ (# of points,)-array
+ The values at each point.
+
+ Raises
+ ------
+ ValueError : if the dimensionality of the input points is different
+ than the dimensionality of the KDE.
+
+ """
+ points = np.atleast_2d(points)
+
+ dim, num_m = np.array(points).shape
+ if dim != self.dim:
+ raise ValueError("points have dimension {}, dataset has dimension "
+ "{}".format(dim, self.dim))
+
+ result = np.zeros(num_m)
+
+ if num_m >= self.num_dp:
+ # there are more points than data, so loop over data
+ for i in range(self.num_dp):
+ diff = self.dataset[:, i, np.newaxis] - points
+ tdiff = np.dot(self.inv_cov, diff)
+ energy = np.sum(diff * tdiff, axis=0) / 2.0
+ result = result + np.exp(-energy)
+ else:
+ # loop over points
+ for i in range(num_m):
+ diff = self.dataset - points[:, i, np.newaxis]
+ tdiff = np.dot(self.inv_cov, diff)
+ energy = np.sum(diff * tdiff, axis=0) / 2.0
+ result[i] = np.sum(np.exp(-energy), axis=0)
+
+ result = result / self.norm_factor
+
+ return result
+
+ __call__ = evaluate
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/quiver.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/quiver.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf518eddaebac6546390877fb8cc19562617f9dd
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/quiver.py
@@ -0,0 +1,1188 @@
+"""
+Support for plotting vector fields.
+
+Presently this contains Quiver and Barb. Quiver plots an arrow in the
+direction of the vector, with the size of the arrow related to the
+magnitude of the vector.
+
+Barbs are like quiver in that they point along a vector, but
+the magnitude of the vector is given schematically by the presence of barbs
+or flags on the barb.
+
+This will also become a home for things such as standard
+deviation ellipses, which can and will be derived very easily from
+the Quiver code.
+"""
+
+import math
+
+import numpy as np
+from numpy import ma
+
+from matplotlib import _api, cbook, docstring, font_manager
+import matplotlib.artist as martist
+import matplotlib.collections as mcollections
+from matplotlib.patches import CirclePolygon
+import matplotlib.text as mtext
+import matplotlib.transforms as transforms
+
+
+_quiver_doc = """
+Plot a 2D field of arrows.
+
+Call signature::
+
+ quiver([X, Y], U, V, [C], **kw)
+
+*X*, *Y* define the arrow locations, *U*, *V* define the arrow directions, and
+*C* optionally sets the color.
+
+Each arrow is internally represented by a filled polygon with a default edge
+linewidth of 0. As a result, an arrow is rather a filled area, not a line with
+a head, and `.PolyCollection` properties like *linewidth*, *linestyle*,
+*facecolor*, etc. act accordingly.
+
+**Arrow size**
+
+The default settings auto-scales the length of the arrows to a reasonable size.
+To change this behavior see the *scale* and *scale_units* parameters.
+
+**Arrow shape**
+
+The defaults give a slightly swept-back arrow; to make the head a
+triangle, make *headaxislength* the same as *headlength*. To make the
+arrow more pointed, reduce *headwidth* or increase *headlength* and
+*headaxislength*. To make the head smaller relative to the shaft,
+scale down all the head parameters. You will probably do best to leave
+minshaft alone.
+
+**Arrow outline**
+
+*linewidths* and *edgecolors* can be used to customize the arrow
+outlines.
+
+Parameters
+----------
+X, Y : 1D or 2D array-like, optional
+ The x and y coordinates of the arrow locations.
+
+ If not given, they will be generated as a uniform integer meshgrid based
+ on the dimensions of *U* and *V*.
+
+ If *X* and *Y* are 1D but *U*, *V* are 2D, *X*, *Y* are expanded to 2D
+ using ``X, Y = np.meshgrid(X, Y)``. In this case ``len(X)`` and ``len(Y)``
+ must match the column and row dimensions of *U* and *V*.
+
+U, V : 1D or 2D array-like
+ The x and y direction components of the arrow vectors.
+
+ They must have the same number of elements, matching the number of arrow
+ locations. *U* and *V* may be masked. Only locations unmasked in
+ *U*, *V*, and *C* will be drawn.
+
+C : 1D or 2D array-like, optional
+ Numeric data that defines the arrow colors by colormapping via *norm* and
+ *cmap*.
+
+ This does not support explicit colors. If you want to set colors directly,
+ use *color* instead. The size of *C* must match the number of arrow
+ locations.
+
+units : {'width', 'height', 'dots', 'inches', 'x', 'y', 'xy'}, default: 'width'
+ The arrow dimensions (except for *length*) are measured in multiples of
+ this unit.
+
+ The following values are supported:
+
+ - 'width', 'height': The width or height of the axis.
+ - 'dots', 'inches': Pixels or inches based on the figure dpi.
+ - 'x', 'y', 'xy': *X*, *Y* or :math:`\\sqrt{X^2 + Y^2}` in data units.
+
+ The arrows scale differently depending on the units. For
+ 'x' or 'y', the arrows get larger as one zooms in; for other
+ units, the arrow size is independent of the zoom state. For
+ 'width or 'height', the arrow size increases with the width and
+ height of the axes, respectively, when the window is resized;
+ for 'dots' or 'inches', resizing does not change the arrows.
+
+angles : {'uv', 'xy'} or array-like, default: 'uv'
+ Method for determining the angle of the arrows.
+
+ - 'uv': The arrow axis aspect ratio is 1 so that
+ if *U* == *V* the orientation of the arrow on the plot is 45 degrees
+ counter-clockwise from the horizontal axis (positive to the right).
+
+ Use this if the arrows symbolize a quantity that is not based on
+ *X*, *Y* data coordinates.
+
+ - 'xy': Arrows point from (x, y) to (x+u, y+v).
+ Use this for plotting a gradient field, for example.
+
+ - Alternatively, arbitrary angles may be specified explicitly as an array
+ of values in degrees, counter-clockwise from the horizontal axis.
+
+ In this case *U*, *V* is only used to determine the length of the
+ arrows.
+
+ Note: inverting a data axis will correspondingly invert the
+ arrows only with ``angles='xy'``.
+
+scale : float, optional
+ Number of data units per arrow length unit, e.g., m/s per plot width; a
+ smaller scale parameter makes the arrow longer. Default is *None*.
+
+ If *None*, a simple autoscaling algorithm is used, based on the average
+ vector length and the number of vectors. The arrow length unit is given by
+ the *scale_units* parameter.
+
+scale_units : {'width', 'height', 'dots', 'inches', 'x', 'y', 'xy'}, optional
+ If the *scale* kwarg is *None*, the arrow length unit. Default is *None*.
+
+ e.g. *scale_units* is 'inches', *scale* is 2.0, and ``(u, v) = (1, 0)``,
+ then the vector will be 0.5 inches long.
+
+ If *scale_units* is 'width' or 'height', then the vector will be half the
+ width/height of the axes.
+
+ If *scale_units* is 'x' then the vector will be 0.5 x-axis
+ units. To plot vectors in the x-y plane, with u and v having
+ the same units as x and y, use
+ ``angles='xy', scale_units='xy', scale=1``.
+
+width : float, optional
+ Shaft width in arrow units; default depends on choice of units,
+ above, and number of vectors; a typical starting value is about
+ 0.005 times the width of the plot.
+
+headwidth : float, default: 3
+ Head width as multiple of shaft width.
+
+headlength : float, default: 5
+ Head length as multiple of shaft width.
+
+headaxislength : float, default: 4.5
+ Head length at shaft intersection.
+
+minshaft : float, default: 1
+ Length below which arrow scales, in units of head length. Do not
+ set this to less than 1, or small arrows will look terrible!
+
+minlength : float, default: 1
+ Minimum length as a multiple of shaft width; if an arrow length
+ is less than this, plot a dot (hexagon) of this diameter instead.
+
+pivot : {'tail', 'mid', 'middle', 'tip'}, default: 'tail'
+ The part of the arrow that is anchored to the *X*, *Y* grid. The arrow
+ rotates about this point.
+
+ 'mid' is a synonym for 'middle'.
+
+color : color or color sequence, optional
+ Explicit color(s) for the arrows. If *C* has been set, *color* has no
+ effect.
+
+ This is a synonym for the `.PolyCollection` *facecolor* parameter.
+
+Other Parameters
+----------------
+data : indexable object, optional
+ DATA_PARAMETER_PLACEHOLDER
+
+**kwargs : `~matplotlib.collections.PolyCollection` properties, optional
+ All other keyword arguments are passed on to `.PolyCollection`:
+
+ %(PolyCollection:kwdoc)s
+
+Returns
+-------
+`~matplotlib.quiver.Quiver`
+
+See Also
+--------
+.Axes.quiverkey : Add a key to a quiver plot.
+""" % docstring.interpd.params
+
+docstring.interpd.update(quiver_doc=_quiver_doc)
+
+
+class QuiverKey(martist.Artist):
+ """Labelled arrow for use as a quiver plot scale key."""
+ halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
+ valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
+ pivot = {'N': 'middle', 'S': 'middle', 'E': 'tip', 'W': 'tail'}
+
+ def __init__(self, Q, X, Y, U, label,
+ *, angle=0, coordinates='axes', color=None, labelsep=0.1,
+ labelpos='N', labelcolor=None, fontproperties=None,
+ **kw):
+ """
+ Add a key to a quiver plot.
+
+ The positioning of the key depends on *X*, *Y*, *coordinates*, and
+ *labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position of
+ the middle of the key arrow. If *labelpos* is 'E', *X*, *Y* positions
+ the head, and if *labelpos* is 'W', *X*, *Y* positions the tail; in
+ either of these two cases, *X*, *Y* is somewhere in the middle of the
+ arrow+label key object.
+
+ Parameters
+ ----------
+ Q : `matplotlib.quiver.Quiver`
+ A `.Quiver` object as returned by a call to `~.Axes.quiver()`.
+ X, Y : float
+ The location of the key.
+ U : float
+ The length of the key.
+ label : str
+ The key label (e.g., length and units of the key).
+ angle : float, default: 0
+ The angle of the key arrow, in degrees anti-clockwise from the
+ x-axis.
+ coordinates : {'axes', 'figure', 'data', 'inches'}, default: 'axes'
+ Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
+ normalized coordinate systems with (0, 0) in the lower left and
+ (1, 1) in the upper right; 'data' are the axes data coordinates
+ (used for the locations of the vectors in the quiver plot itself);
+ 'inches' is position in the figure in inches, with (0, 0) at the
+ lower left corner.
+ color : color
+ Overrides face and edge colors from *Q*.
+ labelpos : {'N', 'S', 'E', 'W'}
+ Position the label above, below, to the right, to the left of the
+ arrow, respectively.
+ labelsep : float, default: 0.1
+ Distance in inches between the arrow and the label.
+ labelcolor : color, default: :rc:`text.color`
+ Label color.
+ fontproperties : dict, optional
+ A dictionary with keyword arguments accepted by the
+ `~matplotlib.font_manager.FontProperties` initializer:
+ *family*, *style*, *variant*, *size*, *weight*.
+ **kwargs
+ Any additional keyword arguments are used to override vector
+ properties taken from *Q*.
+ """
+ super().__init__()
+ self.Q = Q
+ self.X = X
+ self.Y = Y
+ self.U = U
+ self.angle = angle
+ self.coord = coordinates
+ self.color = color
+ self.label = label
+ self._labelsep_inches = labelsep
+
+ self.labelpos = labelpos
+ self.labelcolor = labelcolor
+ self.fontproperties = fontproperties or dict()
+ self.kw = kw
+ _fp = self.fontproperties
+ # boxprops = dict(facecolor='red')
+ self.text = mtext.Text(
+ text=label, # bbox=boxprops,
+ horizontalalignment=self.halign[self.labelpos],
+ verticalalignment=self.valign[self.labelpos],
+ fontproperties=font_manager.FontProperties._from_any(_fp))
+
+ if self.labelcolor is not None:
+ self.text.set_color(self.labelcolor)
+ self._dpi_at_last_init = None
+ self.zorder = Q.zorder + 0.1
+
+ @property
+ def labelsep(self):
+ return self._labelsep_inches * self.Q.axes.figure.dpi
+
+ def _init(self):
+ if True: # self._dpi_at_last_init != self.axes.figure.dpi
+ if self.Q._dpi_at_last_init != self.Q.axes.figure.dpi:
+ self.Q._init()
+ self._set_transform()
+ with cbook._setattr_cm(self.Q, pivot=self.pivot[self.labelpos],
+ # Hack: save and restore the Umask
+ Umask=ma.nomask):
+ u = self.U * np.cos(np.radians(self.angle))
+ v = self.U * np.sin(np.radians(self.angle))
+ angle = (self.Q.angles if isinstance(self.Q.angles, str)
+ else 'uv')
+ self.verts = self.Q._make_verts(
+ np.array([u]), np.array([v]), angle)
+ kw = self.Q.polykw
+ kw.update(self.kw)
+ self.vector = mcollections.PolyCollection(
+ self.verts,
+ offsets=[(self.X, self.Y)],
+ transOffset=self.get_transform(),
+ **kw)
+ if self.color is not None:
+ self.vector.set_color(self.color)
+ self.vector.set_transform(self.Q.get_transform())
+ self.vector.set_figure(self.get_figure())
+ self._dpi_at_last_init = self.Q.axes.figure.dpi
+
+ def _text_x(self, x):
+ if self.labelpos == 'E':
+ return x + self.labelsep
+ elif self.labelpos == 'W':
+ return x - self.labelsep
+ else:
+ return x
+
+ def _text_y(self, y):
+ if self.labelpos == 'N':
+ return y + self.labelsep
+ elif self.labelpos == 'S':
+ return y - self.labelsep
+ else:
+ return y
+
+ @martist.allow_rasterization
+ def draw(self, renderer):
+ self._init()
+ self.vector.draw(renderer)
+ x, y = self.get_transform().transform((self.X, self.Y))
+ self.text.set_x(self._text_x(x))
+ self.text.set_y(self._text_y(y))
+ self.text.draw(renderer)
+ self.stale = False
+
+ def _set_transform(self):
+ self.set_transform(_api.check_getitem({
+ "data": self.Q.axes.transData,
+ "axes": self.Q.axes.transAxes,
+ "figure": self.Q.axes.figure.transFigure,
+ "inches": self.Q.axes.figure.dpi_scale_trans,
+ }, coordinates=self.coord))
+
+ def set_figure(self, fig):
+ super().set_figure(fig)
+ self.text.set_figure(fig)
+
+ def contains(self, mouseevent):
+ inside, info = self._default_contains(mouseevent)
+ if inside is not None:
+ return inside, info
+ # Maybe the dictionary should allow one to
+ # distinguish between a text hit and a vector hit.
+ if (self.text.contains(mouseevent)[0] or
+ self.vector.contains(mouseevent)[0]):
+ return True, {}
+ return False, {}
+
+
+def _parse_args(*args, caller_name='function'):
+ """
+ Helper function to parse positional parameters for colored vector plots.
+
+ This is currently used for Quiver and Barbs.
+
+ Parameters
+ ----------
+ *args : list
+ list of 2-5 arguments. Depending on their number they are parsed to::
+
+ U, V
+ U, V, C
+ X, Y, U, V
+ X, Y, U, V, C
+
+ caller_name : str
+ Name of the calling method (used in error messages).
+ """
+ X = Y = C = None
+
+ len_args = len(args)
+ if len_args == 2:
+ # The use of atleast_1d allows for handling scalar arguments while also
+ # keeping masked arrays
+ U, V = np.atleast_1d(*args)
+ elif len_args == 3:
+ U, V, C = np.atleast_1d(*args)
+ elif len_args == 4:
+ X, Y, U, V = np.atleast_1d(*args)
+ elif len_args == 5:
+ X, Y, U, V, C = np.atleast_1d(*args)
+ else:
+ raise TypeError(f'{caller_name} takes 2-5 positional arguments but '
+ f'{len_args} were given')
+
+ nr, nc = (1, U.shape[0]) if U.ndim == 1 else U.shape
+
+ if X is not None:
+ X = X.ravel()
+ Y = Y.ravel()
+ if len(X) == nc and len(Y) == nr:
+ X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
+ elif len(X) != len(Y):
+ raise ValueError('X and Y must be the same size, but '
+ f'X.size is {X.size} and Y.size is {Y.size}.')
+ else:
+ indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
+ X, Y = [np.ravel(a) for a in indexgrid]
+ # Size validation for U, V, C is left to the set_UVC method.
+ return X, Y, U, V, C
+
+
+def _check_consistent_shapes(*arrays):
+ all_shapes = {a.shape for a in arrays}
+ if len(all_shapes) != 1:
+ raise ValueError('The shapes of the passed in arrays do not match')
+
+
+class Quiver(mcollections.PolyCollection):
+ """
+ Specialized PolyCollection for arrows.
+
+ The only API method is set_UVC(), which can be used
+ to change the size, orientation, and color of the
+ arrows; their locations are fixed when the class is
+ instantiated. Possibly this method will be useful
+ in animations.
+
+ Much of the work in this class is done in the draw()
+ method so that as much information as possible is available
+ about the plot. In subsequent draw() calls, recalculation
+ is limited to things that might have changed, so there
+ should be no performance penalty from putting the calculations
+ in the draw() method.
+ """
+
+ _PIVOT_VALS = ('tail', 'middle', 'tip')
+
+ @docstring.Substitution(_quiver_doc)
+ def __init__(self, ax, *args,
+ scale=None, headwidth=3, headlength=5, headaxislength=4.5,
+ minshaft=1, minlength=1, units='width', scale_units=None,
+ angles='uv', width=None, color='k', pivot='tail', **kw):
+ """
+ The constructor takes one required argument, an Axes
+ instance, followed by the args and kwargs described
+ by the following pyplot interface documentation:
+ %s
+ """
+ self._axes = ax # The attr actually set by the Artist.axes property.
+ X, Y, U, V, C = _parse_args(*args, caller_name='quiver()')
+ self.X = X
+ self.Y = Y
+ self.XY = np.column_stack((X, Y))
+ self.N = len(X)
+ self.scale = scale
+ self.headwidth = headwidth
+ self.headlength = float(headlength)
+ self.headaxislength = headaxislength
+ self.minshaft = minshaft
+ self.minlength = minlength
+ self.units = units
+ self.scale_units = scale_units
+ self.angles = angles
+ self.width = width
+
+ if pivot.lower() == 'mid':
+ pivot = 'middle'
+ self.pivot = pivot.lower()
+ _api.check_in_list(self._PIVOT_VALS, pivot=self.pivot)
+
+ self.transform = kw.pop('transform', ax.transData)
+ kw.setdefault('facecolors', color)
+ kw.setdefault('linewidths', (0,))
+ super().__init__([], offsets=self.XY, transOffset=self.transform,
+ closed=False, **kw)
+ self.polykw = kw
+ self.set_UVC(U, V, C)
+ self._dpi_at_last_init = None
+
+ def _init(self):
+ """
+ Initialization delayed until first draw;
+ allow time for axes setup.
+ """
+ # It seems that there are not enough event notifications
+ # available to have this work on an as-needed basis at present.
+ if True: # self._dpi_at_last_init != self.axes.figure.dpi
+ trans = self._set_transform()
+ self.span = trans.inverted().transform_bbox(self.axes.bbox).width
+ if self.width is None:
+ sn = np.clip(math.sqrt(self.N), 8, 25)
+ self.width = 0.06 * self.span / sn
+
+ # _make_verts sets self.scale if not already specified
+ if (self._dpi_at_last_init != self.axes.figure.dpi
+ and self.scale is None):
+ self._make_verts(self.U, self.V, self.angles)
+
+ self._dpi_at_last_init = self.axes.figure.dpi
+
+ def get_datalim(self, transData):
+ trans = self.get_transform()
+ transOffset = self.get_offset_transform()
+ full_transform = (trans - transData) + (transOffset - transData)
+ XY = full_transform.transform(self.XY)
+ bbox = transforms.Bbox.null()
+ bbox.update_from_data_xy(XY, ignore=True)
+ return bbox
+
+ @martist.allow_rasterization
+ def draw(self, renderer):
+ self._init()
+ verts = self._make_verts(self.U, self.V, self.angles)
+ self.set_verts(verts, closed=False)
+ super().draw(renderer)
+ self.stale = False
+
+ def set_UVC(self, U, V, C=None):
+ # We need to ensure we have a copy, not a reference
+ # to an array that might change before draw().
+ U = ma.masked_invalid(U, copy=True).ravel()
+ V = ma.masked_invalid(V, copy=True).ravel()
+ if C is not None:
+ C = ma.masked_invalid(C, copy=True).ravel()
+ for name, var in zip(('U', 'V', 'C'), (U, V, C)):
+ if not (var is None or var.size == self.N or var.size == 1):
+ raise ValueError(f'Argument {name} has a size {var.size}'
+ f' which does not match {self.N},'
+ ' the number of arrow positions')
+
+ mask = ma.mask_or(U.mask, V.mask, copy=False, shrink=True)
+ if C is not None:
+ mask = ma.mask_or(mask, C.mask, copy=False, shrink=True)
+ if mask is ma.nomask:
+ C = C.filled()
+ else:
+ C = ma.array(C, mask=mask, copy=False)
+ self.U = U.filled(1)
+ self.V = V.filled(1)
+ self.Umask = mask
+ if C is not None:
+ self.set_array(C)
+ self.stale = True
+
+ def _dots_per_unit(self, units):
+ """
+ Return a scale factor for converting from units to pixels
+ """
+ if units in ('x', 'y', 'xy'):
+ if units == 'x':
+ dx0 = self.axes.viewLim.width
+ dx1 = self.axes.bbox.width
+ elif units == 'y':
+ dx0 = self.axes.viewLim.height
+ dx1 = self.axes.bbox.height
+ else: # 'xy' is assumed
+ dxx0 = self.axes.viewLim.width
+ dxx1 = self.axes.bbox.width
+ dyy0 = self.axes.viewLim.height
+ dyy1 = self.axes.bbox.height
+ dx1 = np.hypot(dxx1, dyy1)
+ dx0 = np.hypot(dxx0, dyy0)
+ dx = dx1 / dx0
+ else:
+ if units == 'width':
+ dx = self.axes.bbox.width
+ elif units == 'height':
+ dx = self.axes.bbox.height
+ elif units == 'dots':
+ dx = 1.0
+ elif units == 'inches':
+ dx = self.axes.figure.dpi
+ else:
+ raise ValueError('unrecognized units')
+ return dx
+
+ def _set_transform(self):
+ """
+ Set the PolyCollection transform to go
+ from arrow width units to pixels.
+ """
+ dx = self._dots_per_unit(self.units)
+ self._trans_scale = dx # pixels per arrow width unit
+ trans = transforms.Affine2D().scale(dx)
+ self.set_transform(trans)
+ return trans
+
+ def _angles_lengths(self, U, V, eps=1):
+ xy = self.axes.transData.transform(self.XY)
+ uv = np.column_stack((U, V))
+ xyp = self.axes.transData.transform(self.XY + eps * uv)
+ dxy = xyp - xy
+ angles = np.arctan2(dxy[:, 1], dxy[:, 0])
+ lengths = np.hypot(*dxy.T) / eps
+ return angles, lengths
+
+ def _make_verts(self, U, V, angles):
+ uv = (U + V * 1j)
+ str_angles = angles if isinstance(angles, str) else ''
+ if str_angles == 'xy' and self.scale_units == 'xy':
+ # Here eps is 1 so that if we get U, V by diffing
+ # the X, Y arrays, the vectors will connect the
+ # points, regardless of the axis scaling (including log).
+ angles, lengths = self._angles_lengths(U, V, eps=1)
+ elif str_angles == 'xy' or self.scale_units == 'xy':
+ # Calculate eps based on the extents of the plot
+ # so that we don't end up with roundoff error from
+ # adding a small number to a large.
+ eps = np.abs(self.axes.dataLim.extents).max() * 0.001
+ angles, lengths = self._angles_lengths(U, V, eps=eps)
+ if str_angles and self.scale_units == 'xy':
+ a = lengths
+ else:
+ a = np.abs(uv)
+ if self.scale is None:
+ sn = max(10, math.sqrt(self.N))
+ if self.Umask is not ma.nomask:
+ amean = a[~self.Umask].mean()
+ else:
+ amean = a.mean()
+ # crude auto-scaling
+ # scale is typical arrow length as a multiple of the arrow width
+ scale = 1.8 * amean * sn / self.span
+ if self.scale_units is None:
+ if self.scale is None:
+ self.scale = scale
+ widthu_per_lenu = 1.0
+ else:
+ if self.scale_units == 'xy':
+ dx = 1
+ else:
+ dx = self._dots_per_unit(self.scale_units)
+ widthu_per_lenu = dx / self._trans_scale
+ if self.scale is None:
+ self.scale = scale * widthu_per_lenu
+ length = a * (widthu_per_lenu / (self.scale * self.width))
+ X, Y = self._h_arrows(length)
+ if str_angles == 'xy':
+ theta = angles
+ elif str_angles == 'uv':
+ theta = np.angle(uv)
+ else:
+ theta = ma.masked_invalid(np.deg2rad(angles)).filled(0)
+ theta = theta.reshape((-1, 1)) # for broadcasting
+ xy = (X + Y * 1j) * np.exp(1j * theta) * self.width
+ XY = np.stack((xy.real, xy.imag), axis=2)
+ if self.Umask is not ma.nomask:
+ XY = ma.array(XY)
+ XY[self.Umask] = ma.masked
+ # This might be handled more efficiently with nans, given
+ # that nans will end up in the paths anyway.
+
+ return XY
+
+ def _h_arrows(self, length):
+ """Length is in arrow width units."""
+ # It might be possible to streamline the code
+ # and speed it up a bit by using complex (x, y)
+ # instead of separate arrays; but any gain would be slight.
+ minsh = self.minshaft * self.headlength
+ N = len(length)
+ length = length.reshape(N, 1)
+ # This number is chosen based on when pixel values overflow in Agg
+ # causing rendering errors
+ # length = np.minimum(length, 2 ** 16)
+ np.clip(length, 0, 2 ** 16, out=length)
+ # x, y: normal horizontal arrow
+ x = np.array([0, -self.headaxislength,
+ -self.headlength, 0],
+ np.float64)
+ x = x + np.array([0, 1, 1, 1]) * length
+ y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
+ y = np.repeat(y[np.newaxis, :], N, axis=0)
+ # x0, y0: arrow without shaft, for short vectors
+ x0 = np.array([0, minsh - self.headaxislength,
+ minsh - self.headlength, minsh], np.float64)
+ y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
+ ii = [0, 1, 2, 3, 2, 1, 0, 0]
+ X = x[:, ii]
+ Y = y[:, ii]
+ Y[:, 3:-1] *= -1
+ X0 = x0[ii]
+ Y0 = y0[ii]
+ Y0[3:-1] *= -1
+ shrink = length / minsh if minsh != 0. else 0.
+ X0 = shrink * X0[np.newaxis, :]
+ Y0 = shrink * Y0[np.newaxis, :]
+ short = np.repeat(length < minsh, 8, axis=1)
+ # Now select X0, Y0 if short, otherwise X, Y
+ np.copyto(X, X0, where=short)
+ np.copyto(Y, Y0, where=short)
+ if self.pivot == 'middle':
+ X -= 0.5 * X[:, 3, np.newaxis]
+ elif self.pivot == 'tip':
+ # numpy bug? using -= does not work here unless we multiply by a
+ # float first, as with 'mid'.
+ X = X - X[:, 3, np.newaxis]
+ elif self.pivot != 'tail':
+ _api.check_in_list(["middle", "tip", "tail"], pivot=self.pivot)
+
+ tooshort = length < self.minlength
+ if tooshort.any():
+ # Use a heptagonal dot:
+ th = np.arange(0, 8, 1, np.float64) * (np.pi / 3.0)
+ x1 = np.cos(th) * self.minlength * 0.5
+ y1 = np.sin(th) * self.minlength * 0.5
+ X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
+ Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
+ tooshort = np.repeat(tooshort, 8, 1)
+ np.copyto(X, X1, where=tooshort)
+ np.copyto(Y, Y1, where=tooshort)
+ # Mask handling is deferred to the caller, _make_verts.
+ return X, Y
+
+ quiver_doc = _quiver_doc
+
+
+_barbs_doc = r"""
+Plot a 2D field of barbs.
+
+Call signature::
+
+ barbs([X, Y], U, V, [C], **kw)
+
+Where *X*, *Y* define the barb locations, *U*, *V* define the barb
+directions, and *C* optionally sets the color.
+
+All arguments may be 1D or 2D. *U*, *V*, *C* may be masked arrays, but masked
+*X*, *Y* are not supported at present.
+
+Barbs are traditionally used in meteorology as a way to plot the speed
+and direction of wind observations, but can technically be used to
+plot any two dimensional vector quantity. As opposed to arrows, which
+give vector magnitude by the length of the arrow, the barbs give more
+quantitative information about the vector magnitude by putting slanted
+lines or a triangle for various increments in magnitude, as show
+schematically below::
+
+ : /\ \
+ : / \ \
+ : / \ \ \
+ : / \ \ \
+ : ------------------------------
+
+The largest increment is given by a triangle (or "flag"). After those
+come full lines (barbs). The smallest increment is a half line. There
+is only, of course, ever at most 1 half line. If the magnitude is
+small and only needs a single half-line and no full lines or
+triangles, the half-line is offset from the end of the barb so that it
+can be easily distinguished from barbs with a single full line. The
+magnitude for the barb shown above would nominally be 65, using the
+standard increments of 50, 10, and 5.
+
+See also https://en.wikipedia.org/wiki/Wind_barb.
+
+Parameters
+----------
+X, Y : 1D or 2D array-like, optional
+ The x and y coordinates of the barb locations. See *pivot* for how the
+ barbs are drawn to the x, y positions.
+
+ If not given, they will be generated as a uniform integer meshgrid based
+ on the dimensions of *U* and *V*.
+
+ If *X* and *Y* are 1D but *U*, *V* are 2D, *X*, *Y* are expanded to 2D
+ using ``X, Y = np.meshgrid(X, Y)``. In this case ``len(X)`` and ``len(Y)``
+ must match the column and row dimensions of *U* and *V*.
+
+U, V : 1D or 2D array-like
+ The x and y components of the barb shaft.
+
+C : 1D or 2D array-like, optional
+ Numeric data that defines the barb colors by colormapping via *norm* and
+ *cmap*.
+
+ This does not support explicit colors. If you want to set colors directly,
+ use *barbcolor* instead.
+
+length : float, default: 7
+ Length of the barb in points; the other parts of the barb
+ are scaled against this.
+
+pivot : {'tip', 'middle'} or float, default: 'tip'
+ The part of the arrow that is anchored to the *X*, *Y* grid. The barb
+ rotates about this point. This can also be a number, which shifts the
+ start of the barb that many points away from grid point.
+
+barbcolor : color or color sequence
+ The color of all parts of the barb except for the flags. This parameter
+ is analogous to the *edgecolor* parameter for polygons, which can be used
+ instead. However this parameter will override facecolor.
+
+flagcolor : color or color sequence
+ The color of any flags on the barb. This parameter is analogous to the
+ *facecolor* parameter for polygons, which can be used instead. However,
+ this parameter will override facecolor. If this is not set (and *C* has
+ not either) then *flagcolor* will be set to match *barbcolor* so that the
+ barb has a uniform color. If *C* has been set, *flagcolor* has no effect.
+
+sizes : dict, optional
+ A dictionary of coefficients specifying the ratio of a given
+ feature to the length of the barb. Only those values one wishes to
+ override need to be included. These features include:
+
+ - 'spacing' - space between features (flags, full/half barbs)
+ - 'height' - height (distance from shaft to top) of a flag or full barb
+ - 'width' - width of a flag, twice the width of a full barb
+ - 'emptybarb' - radius of the circle used for low magnitudes
+
+fill_empty : bool, default: False
+ Whether the empty barbs (circles) that are drawn should be filled with
+ the flag color. If they are not filled, the center is transparent.
+
+rounding : bool, default: True
+ Whether the vector magnitude should be rounded when allocating barb
+ components. If True, the magnitude is rounded to the nearest multiple
+ of the half-barb increment. If False, the magnitude is simply truncated
+ to the next lowest multiple.
+
+barb_increments : dict, optional
+ A dictionary of increments specifying values to associate with
+ different parts of the barb. Only those values one wishes to
+ override need to be included.
+
+ - 'half' - half barbs (Default is 5)
+ - 'full' - full barbs (Default is 10)
+ - 'flag' - flags (default is 50)
+
+flip_barb : bool or array-like of bool, default: False
+ Whether the lines and flags should point opposite to normal.
+ Normal behavior is for the barbs and lines to point right (comes from wind
+ barbs having these features point towards low pressure in the Northern
+ Hemisphere).
+
+ A single value is applied to all barbs. Individual barbs can be flipped by
+ passing a bool array of the same size as *U* and *V*.
+
+Returns
+-------
+barbs : `~matplotlib.quiver.Barbs`
+
+Other Parameters
+----------------
+data : indexable object, optional
+ DATA_PARAMETER_PLACEHOLDER
+
+**kwargs
+ The barbs can further be customized using `.PolyCollection` keyword
+ arguments:
+
+ %(PolyCollection:kwdoc)s
+""" % docstring.interpd.params
+
+docstring.interpd.update(barbs_doc=_barbs_doc)
+
+
+class Barbs(mcollections.PolyCollection):
+ """
+ Specialized PolyCollection for barbs.
+
+ The only API method is :meth:`set_UVC`, which can be used to
+ change the size, orientation, and color of the arrows. Locations
+ are changed using the :meth:`set_offsets` collection method.
+ Possibly this method will be useful in animations.
+
+ There is one internal function :meth:`_find_tails` which finds
+ exactly what should be put on the barb given the vector magnitude.
+ From there :meth:`_make_barbs` is used to find the vertices of the
+ polygon to represent the barb based on this information.
+ """
+ # This may be an abuse of polygons here to render what is essentially maybe
+ # 1 triangle and a series of lines. It works fine as far as I can tell
+ # however.
+ @docstring.interpd
+ def __init__(self, ax, *args,
+ pivot='tip', length=7, barbcolor=None, flagcolor=None,
+ sizes=None, fill_empty=False, barb_increments=None,
+ rounding=True, flip_barb=False, **kw):
+ """
+ The constructor takes one required argument, an Axes
+ instance, followed by the args and kwargs described
+ by the following pyplot interface documentation:
+ %(barbs_doc)s
+ """
+ self.sizes = sizes or dict()
+ self.fill_empty = fill_empty
+ self.barb_increments = barb_increments or dict()
+ self.rounding = rounding
+ self.flip = np.atleast_1d(flip_barb)
+ transform = kw.pop('transform', ax.transData)
+ self._pivot = pivot
+ self._length = length
+ barbcolor = barbcolor
+ flagcolor = flagcolor
+
+ # Flagcolor and barbcolor provide convenience parameters for
+ # setting the facecolor and edgecolor, respectively, of the barb
+ # polygon. We also work here to make the flag the same color as the
+ # rest of the barb by default
+
+ if None in (barbcolor, flagcolor):
+ kw['edgecolors'] = 'face'
+ if flagcolor:
+ kw['facecolors'] = flagcolor
+ elif barbcolor:
+ kw['facecolors'] = barbcolor
+ else:
+ # Set to facecolor passed in or default to black
+ kw.setdefault('facecolors', 'k')
+ else:
+ kw['edgecolors'] = barbcolor
+ kw['facecolors'] = flagcolor
+
+ # Explicitly set a line width if we're not given one, otherwise
+ # polygons are not outlined and we get no barbs
+ if 'linewidth' not in kw and 'lw' not in kw:
+ kw['linewidth'] = 1
+
+ # Parse out the data arrays from the various configurations supported
+ x, y, u, v, c = _parse_args(*args, caller_name='barbs()')
+ self.x = x
+ self.y = y
+ xy = np.column_stack((x, y))
+
+ # Make a collection
+ barb_size = self._length ** 2 / 4 # Empirically determined
+ super().__init__([], (barb_size,), offsets=xy, transOffset=transform,
+ **kw)
+ self.set_transform(transforms.IdentityTransform())
+
+ self.set_UVC(u, v, c)
+
+ def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
+ """
+ Find how many of each of the tail pieces is necessary. Flag
+ specifies the increment for a flag, barb for a full barb, and half for
+ half a barb. Mag should be the magnitude of a vector (i.e., >= 0).
+
+ This returns a tuple of:
+
+ (*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
+
+ The bool *half_flag* indicates whether half of a barb is needed,
+ since there should only ever be one half on a given
+ barb. *empty_flag* flag is an array of flags to easily tell if
+ a barb is empty (too low to plot any barbs/flags.
+ """
+
+ # If rounding, round to the nearest multiple of half, the smallest
+ # increment
+ if rounding:
+ mag = half * (mag / half + 0.5).astype(int)
+
+ num_flags = np.floor(mag / flag).astype(int)
+ mag = mag % flag
+
+ num_barb = np.floor(mag / full).astype(int)
+ mag = mag % full
+
+ half_flag = mag >= half
+ empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
+
+ return num_flags, num_barb, half_flag, empty_flag
+
+ def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
+ pivot, sizes, fill_empty, flip):
+ """
+ Create the wind barbs.
+
+ Parameters
+ ----------
+ u, v
+ Components of the vector in the x and y directions, respectively.
+
+ nflags, nbarbs, half_barb, empty_flag
+ Respectively, the number of flags, number of barbs, flag for
+ half a barb, and flag for empty barb, ostensibly obtained from
+ :meth:`_find_tails`.
+
+ length
+ The length of the barb staff in points.
+
+ pivot : {"tip", "middle"} or number
+ The point on the barb around which the entire barb should be
+ rotated. If a number, the start of the barb is shifted by that
+ many points from the origin.
+
+ sizes : dict
+ Coefficients specifying the ratio of a given feature to the length
+ of the barb. These features include:
+
+ - *spacing*: space between features (flags, full/half barbs).
+ - *height*: distance from shaft of top of a flag or full barb.
+ - *width*: width of a flag, twice the width of a full barb.
+ - *emptybarb*: radius of the circle used for low magnitudes.
+
+ fill_empty : bool
+ Whether the circle representing an empty barb should be filled or
+ not (this changes the drawing of the polygon).
+
+ flip : list of bool
+ Whether the features should be flipped to the other side of the
+ barb (useful for winds in the southern hemisphere).
+
+ Returns
+ -------
+ list of arrays of vertices
+ Polygon vertices for each of the wind barbs. These polygons have
+ been rotated to properly align with the vector direction.
+ """
+
+ # These control the spacing and size of barb elements relative to the
+ # length of the shaft
+ spacing = length * sizes.get('spacing', 0.125)
+ full_height = length * sizes.get('height', 0.4)
+ full_width = length * sizes.get('width', 0.25)
+ empty_rad = length * sizes.get('emptybarb', 0.15)
+
+ # Controls y point where to pivot the barb.
+ pivot_points = dict(tip=0.0, middle=-length / 2.)
+
+ endx = 0.0
+ try:
+ endy = float(pivot)
+ except ValueError:
+ endy = pivot_points[pivot.lower()]
+
+ # Get the appropriate angle for the vector components. The offset is
+ # due to the way the barb is initially drawn, going down the y-axis.
+ # This makes sense in a meteorological mode of thinking since there 0
+ # degrees corresponds to north (the y-axis traditionally)
+ angles = -(ma.arctan2(v, u) + np.pi / 2)
+
+ # Used for low magnitude. We just get the vertices, so if we make it
+ # out here, it can be reused. The center set here should put the
+ # center of the circle at the location(offset), rather than at the
+ # same point as the barb pivot; this seems more sensible.
+ circ = CirclePolygon((0, 0), radius=empty_rad).get_verts()
+ if fill_empty:
+ empty_barb = circ
+ else:
+ # If we don't want the empty one filled, we make a degenerate
+ # polygon that wraps back over itself
+ empty_barb = np.concatenate((circ, circ[::-1]))
+
+ barb_list = []
+ for index, angle in np.ndenumerate(angles):
+ # If the vector magnitude is too weak to draw anything, plot an
+ # empty circle instead
+ if empty_flag[index]:
+ # We can skip the transform since the circle has no preferred
+ # orientation
+ barb_list.append(empty_barb)
+ continue
+
+ poly_verts = [(endx, endy)]
+ offset = length
+
+ # Handle if this barb should be flipped
+ barb_height = -full_height if flip[index] else full_height
+
+ # Add vertices for each flag
+ for i in range(nflags[index]):
+ # The spacing that works for the barbs is a little to much for
+ # the flags, but this only occurs when we have more than 1
+ # flag.
+ if offset != length:
+ offset += spacing / 2.
+ poly_verts.extend(
+ [[endx, endy + offset],
+ [endx + barb_height, endy - full_width / 2 + offset],
+ [endx, endy - full_width + offset]])
+
+ offset -= full_width + spacing
+
+ # Add vertices for each barb. These really are lines, but works
+ # great adding 3 vertices that basically pull the polygon out and
+ # back down the line
+ for i in range(nbarbs[index]):
+ poly_verts.extend(
+ [(endx, endy + offset),
+ (endx + barb_height, endy + offset + full_width / 2),
+ (endx, endy + offset)])
+
+ offset -= spacing
+
+ # Add the vertices for half a barb, if needed
+ if half_barb[index]:
+ # If the half barb is the first on the staff, traditionally it
+ # is offset from the end to make it easy to distinguish from a
+ # barb with a full one
+ if offset == length:
+ poly_verts.append((endx, endy + offset))
+ offset -= 1.5 * spacing
+ poly_verts.extend(
+ [(endx, endy + offset),
+ (endx + barb_height / 2, endy + offset + full_width / 4),
+ (endx, endy + offset)])
+
+ # Rotate the barb according the angle. Making the barb first and
+ # then rotating it made the math for drawing the barb really easy.
+ # Also, the transform framework makes doing the rotation simple.
+ poly_verts = transforms.Affine2D().rotate(-angle).transform(
+ poly_verts)
+ barb_list.append(poly_verts)
+
+ return barb_list
+
+ def set_UVC(self, U, V, C=None):
+ # We need to ensure we have a copy, not a reference to an array that
+ # might change before draw().
+ self.u = ma.masked_invalid(U, copy=True).ravel()
+ self.v = ma.masked_invalid(V, copy=True).ravel()
+
+ # Flip needs to have the same number of entries as everything else.
+ # Use broadcast_to to avoid a bloated array of identical values.
+ # (can't rely on actual broadcasting)
+ if len(self.flip) == 1:
+ flip = np.broadcast_to(self.flip, self.u.shape)
+ else:
+ flip = self.flip
+
+ if C is not None:
+ c = ma.masked_invalid(C, copy=True).ravel()
+ x, y, u, v, c, flip = cbook.delete_masked_points(
+ self.x.ravel(), self.y.ravel(), self.u, self.v, c,
+ flip.ravel())
+ _check_consistent_shapes(x, y, u, v, c, flip)
+ else:
+ x, y, u, v, flip = cbook.delete_masked_points(
+ self.x.ravel(), self.y.ravel(), self.u, self.v, flip.ravel())
+ _check_consistent_shapes(x, y, u, v, flip)
+
+ magnitude = np.hypot(u, v)
+ flags, barbs, halves, empty = self._find_tails(magnitude,
+ self.rounding,
+ **self.barb_increments)
+
+ # Get the vertices for each of the barbs
+
+ plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
+ self._length, self._pivot, self.sizes,
+ self.fill_empty, flip)
+ self.set_verts(plot_barbs)
+
+ # Set the color array
+ if C is not None:
+ self.set_array(c)
+
+ # Update the offsets in case the masked data changed
+ xy = np.column_stack((x, y))
+ self._offsets = xy
+ self.stale = True
+
+ def set_offsets(self, xy):
+ """
+ Set the offsets for the barb polygons. This saves the offsets passed
+ in and masks them as appropriate for the existing U/V data.
+
+ Parameters
+ ----------
+ xy : sequence of pairs of floats
+ """
+ self.x = xy[:, 0]
+ self.y = xy[:, 1]
+ x, y, u, v = cbook.delete_masked_points(
+ self.x.ravel(), self.y.ravel(), self.u, self.v)
+ _check_consistent_shapes(x, y, u, v)
+ xy = np.column_stack((x, y))
+ super().set_offsets(xy)
+ self.stale = True
+
+ barbs_doc = _barbs_doc
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/stackplot.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/stackplot.py
new file mode 100644
index 0000000000000000000000000000000000000000..c580043eebbcd1bace132a1718dbfa8e82c853ce
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/stackplot.py
@@ -0,0 +1,124 @@
+"""
+Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
+answer:
+https://stackoverflow.com/q/2225995/
+
+(https://stackoverflow.com/users/66549/doug)
+"""
+
+import numpy as np
+
+from matplotlib import _api
+
+__all__ = ['stackplot']
+
+
+def stackplot(axes, x, *args,
+ labels=(), colors=None, baseline='zero',
+ **kwargs):
+ """
+ Draw a stacked area plot.
+
+ Parameters
+ ----------
+ x : (N,) array-like
+
+ y : (M, N) array-like
+ The data is assumed to be unstacked. Each of the following
+ calls is legal::
+
+ stackplot(x, y) # where y has shape (M, N)
+ stackplot(x, y1, y2, y3) # where y1, y2, y3, y4 have length N
+
+ baseline : {'zero', 'sym', 'wiggle', 'weighted_wiggle'}
+ Method used to calculate the baseline:
+
+ - ``'zero'``: Constant zero baseline, i.e. a simple stacked plot.
+ - ``'sym'``: Symmetric around zero and is sometimes called
+ 'ThemeRiver'.
+ - ``'wiggle'``: Minimizes the sum of the squared slopes.
+ - ``'weighted_wiggle'``: Does the same but weights to account for
+ size of each layer. It is also called 'Streamgraph'-layout. More
+ details can be found at http://leebyron.com/streamgraph/.
+
+ labels : list of str, optional
+ A sequence of labels to assign to each data series. If unspecified,
+ then no labels will be applied to artists.
+
+ colors : list of color, optional
+ A sequence of colors to be cycled through and used to color the stacked
+ areas. The sequence need not be exactly the same length as the number
+ of provided *y*, in which case the colors will repeat from the
+ beginning.
+
+ If not specified, the colors from the Axes property cycle will be used.
+
+ data : indexable object, optional
+ DATA_PARAMETER_PLACEHOLDER
+
+ **kwargs
+ All other keyword arguments are passed to `.Axes.fill_between`.
+
+ Returns
+ -------
+ list of `.PolyCollection`
+ A list of `.PolyCollection` instances, one for each element in the
+ stacked area plot.
+ """
+
+ y = np.row_stack(args)
+
+ labels = iter(labels)
+ if colors is not None:
+ axes.set_prop_cycle(color=colors)
+
+ # Assume data passed has not been 'stacked', so stack it here.
+ # We'll need a float buffer for the upcoming calculations.
+ stack = np.cumsum(y, axis=0, dtype=np.promote_types(y.dtype, np.float32))
+
+ _api.check_in_list(['zero', 'sym', 'wiggle', 'weighted_wiggle'],
+ baseline=baseline)
+ if baseline == 'zero':
+ first_line = 0.
+
+ elif baseline == 'sym':
+ first_line = -np.sum(y, 0) * 0.5
+ stack += first_line[None, :]
+
+ elif baseline == 'wiggle':
+ m = y.shape[0]
+ first_line = (y * (m - 0.5 - np.arange(m)[:, None])).sum(0)
+ first_line /= -m
+ stack += first_line
+
+ elif baseline == 'weighted_wiggle':
+ total = np.sum(y, 0)
+ # multiply by 1/total (or zero) to avoid infinities in the division:
+ inv_total = np.zeros_like(total)
+ mask = total > 0
+ inv_total[mask] = 1.0 / total[mask]
+ increase = np.hstack((y[:, 0:1], np.diff(y)))
+ below_size = total - stack
+ below_size += 0.5 * y
+ move_up = below_size * inv_total
+ move_up[:, 0] = 0.5
+ center = (move_up - 0.5) * increase
+ center = np.cumsum(center.sum(0))
+ first_line = center - 0.5 * total
+ stack += first_line
+
+ # Color between x = 0 and the first array.
+ color = axes._get_lines.get_next_color()
+ coll = axes.fill_between(x, first_line, stack[0, :],
+ facecolor=color, label=next(labels, None),
+ **kwargs)
+ coll.sticky_edges.y[:] = [0]
+ r = [coll]
+
+ # Color between array i-1 and array i
+ for i in range(len(y) - 1):
+ color = axes._get_lines.get_next_color()
+ r.append(axes.fill_between(x, stack[i, :], stack[i + 1, :],
+ facecolor=color, label=next(labels, None),
+ **kwargs))
+ return r
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/texmanager.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/texmanager.py
new file mode 100644
index 0000000000000000000000000000000000000000..62aca98a32b69689f0b9ab961f264e2cd57ec866
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/texmanager.py
@@ -0,0 +1,340 @@
+r"""
+Support for embedded TeX expressions in Matplotlib.
+
+Requirements:
+
+* LaTeX.
+* \*Agg backends: dvipng>=1.6.
+* PS backend: PSfrag, dvips, and Ghostscript>=9.0.
+* PDF and SVG backends: if LuaTeX is present, it will be used to speed up some
+ post-processing steps, but note that it is not used to parse the TeX string
+ itself (only LaTeX is supported).
+
+To enable TeX rendering of all text in your Matplotlib figure, set
+:rc:`text.usetex` to True.
+
+TeX and dvipng/dvips processing results are cached
+in ~/.matplotlib/tex.cache for reuse between sessions.
+
+`TexManager.get_rgba` can also be used to directly obtain raster output as RGBA
+NumPy arrays.
+"""
+
+import functools
+import hashlib
+import logging
+import os
+from pathlib import Path
+import subprocess
+from tempfile import TemporaryDirectory
+
+import numpy as np
+from packaging.version import parse as parse_version
+
+import matplotlib as mpl
+from matplotlib import _api, cbook, dviread, rcParams
+
+_log = logging.getLogger(__name__)
+
+
+def _usepackage_if_not_loaded(package, *, option=None):
+ """
+ Output LaTeX code that loads a package (possibly with an option) if it
+ hasn't been loaded yet.
+
+ LaTeX cannot load twice a package with different options, so this helper
+ can be used to protect against users loading arbitrary packages/options in
+ their custom preamble.
+ """
+ option = f"[{option}]" if option is not None else ""
+ return (
+ r"\makeatletter"
+ r"\@ifpackageloaded{%(package)s}{}{\usepackage%(option)s{%(package)s}}"
+ r"\makeatother"
+ ) % {"package": package, "option": option}
+
+
+class TexManager:
+ """
+ Convert strings to dvi files using TeX, caching the results to a directory.
+
+ Repeated calls to this constructor always return the same instance.
+ """
+
+ texcache = os.path.join(mpl.get_cachedir(), 'tex.cache')
+
+ _grey_arrayd = {}
+ _font_family = 'serif'
+ _font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
+ _font_info = {
+ 'new century schoolbook': ('pnc', r'\renewcommand{\rmdefault}{pnc}'),
+ 'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
+ 'times': ('ptm', r'\usepackage{mathptmx}'),
+ 'palatino': ('ppl', r'\usepackage{mathpazo}'),
+ 'zapf chancery': ('pzc', r'\usepackage{chancery}'),
+ 'cursive': ('pzc', r'\usepackage{chancery}'),
+ 'charter': ('pch', r'\usepackage{charter}'),
+ 'serif': ('cmr', ''),
+ 'sans-serif': ('cmss', ''),
+ 'helvetica': ('phv', r'\usepackage{helvet}'),
+ 'avant garde': ('pag', r'\usepackage{avant}'),
+ 'courier': ('pcr', r'\usepackage{courier}'),
+ # Loading the type1ec package ensures that cm-super is installed, which
+ # is necessary for unicode computer modern. (It also allows the use of
+ # computer modern at arbitrary sizes, but that's just a side effect.)
+ 'monospace': ('cmtt', r'\usepackage{type1ec}'),
+ 'computer modern roman': ('cmr', r'\usepackage{type1ec}'),
+ 'computer modern sans serif': ('cmss', r'\usepackage{type1ec}'),
+ 'computer modern typewriter': ('cmtt', r'\usepackage{type1ec}')}
+ _font_types = {
+ 'new century schoolbook': 'serif', 'bookman': 'serif',
+ 'times': 'serif', 'palatino': 'serif', 'charter': 'serif',
+ 'computer modern roman': 'serif', 'zapf chancery': 'cursive',
+ 'helvetica': 'sans-serif', 'avant garde': 'sans-serif',
+ 'computer modern sans serif': 'sans-serif',
+ 'courier': 'monospace', 'computer modern typewriter': 'monospace'}
+
+ grey_arrayd = _api.deprecate_privatize_attribute("3.5")
+ font_family = _api.deprecate_privatize_attribute("3.5")
+ font_families = _api.deprecate_privatize_attribute("3.5")
+ font_info = _api.deprecate_privatize_attribute("3.5")
+
+ @functools.lru_cache() # Always return the same instance.
+ def __new__(cls):
+ Path(cls.texcache).mkdir(parents=True, exist_ok=True)
+ return object.__new__(cls)
+
+ def get_font_config(self):
+ ff = rcParams['font.family']
+ ff_val = ff[0].lower() if len(ff) == 1 else None
+ reduced_notation = False
+ if len(ff) == 1 and ff_val in self._font_families:
+ self._font_family = ff_val
+ elif len(ff) == 1 and ff_val in self._font_info:
+ reduced_notation = True
+ self._font_family = self._font_types[ff_val]
+ else:
+ _log.info('font.family must be one of (%s) when text.usetex is '
+ 'True. serif will be used by default.',
+ ', '.join(self._font_families))
+ self._font_family = 'serif'
+
+ fontconfig = [self._font_family]
+ fonts = {}
+ for font_family in self._font_families:
+ if reduced_notation and self._font_family == font_family:
+ fonts[font_family] = self._font_info[ff_val]
+ else:
+ for font in rcParams['font.' + font_family]:
+ if font.lower() in self._font_info:
+ fonts[font_family] = self._font_info[font.lower()]
+ _log.debug(
+ 'family: %s, font: %s, info: %s',
+ font_family, font, self._font_info[font.lower()])
+ break
+ else:
+ _log.debug('%s font is not compatible with usetex.',
+ font)
+ else:
+ _log.info('No LaTeX-compatible font found for the %s font'
+ 'family in rcParams. Using default.',
+ font_family)
+ fonts[font_family] = self._font_info[font_family]
+ fontconfig.append(fonts[font_family][0])
+ # Add a hash of the latex preamble to fontconfig so that the
+ # correct png is selected for strings rendered with same font and dpi
+ # even if the latex preamble changes within the session
+ preamble_bytes = self.get_custom_preamble().encode('utf-8')
+ fontconfig.append(hashlib.md5(preamble_bytes).hexdigest())
+
+ # The following packages and commands need to be included in the latex
+ # file's preamble:
+ cmd = {fonts[family][1]
+ for family in ['serif', 'sans-serif', 'monospace']}
+ if self._font_family == 'cursive':
+ cmd.add(fonts['cursive'][1])
+ cmd.add(r'\usepackage{type1cm}')
+ self._font_preamble = '\n'.join(sorted(cmd))
+
+ return ''.join(fontconfig)
+
+ def get_basefile(self, tex, fontsize, dpi=None):
+ """
+ Return a filename based on a hash of the string, fontsize, and dpi.
+ """
+ s = ''.join([tex, self.get_font_config(), '%f' % fontsize,
+ self.get_custom_preamble(), str(dpi or '')])
+ return os.path.join(
+ self.texcache, hashlib.md5(s.encode('utf-8')).hexdigest())
+
+ def get_font_preamble(self):
+ """
+ Return a string containing font configuration for the tex preamble.
+ """
+ return self._font_preamble
+
+ def get_custom_preamble(self):
+ """Return a string containing user additions to the tex preamble."""
+ return rcParams['text.latex.preamble']
+
+ def _get_preamble(self):
+ return "\n".join([
+ r"\documentclass{article}",
+ # Pass-through \mathdefault, which is used in non-usetex mode to
+ # use the default text font but was historically suppressed in
+ # usetex mode.
+ r"\newcommand{\mathdefault}[1]{#1}",
+ self._font_preamble,
+ r"\usepackage[utf8]{inputenc}",
+ r"\DeclareUnicodeCharacter{2212}{\ensuremath{-}}",
+ # geometry is loaded before the custom preamble as convert_psfrags
+ # relies on a custom preamble to change the geometry.
+ r"\usepackage[papersize=72in, margin=1in]{geometry}",
+ self.get_custom_preamble(),
+ # Use `underscore` package to take care of underscores in text
+ # The [strings] option allows to use underscores in file names
+ _usepackage_if_not_loaded("underscore", option="strings"),
+ # Custom packages (e.g. newtxtext) may already have loaded textcomp
+ # with different options.
+ _usepackage_if_not_loaded("textcomp"),
+ ])
+
+ def make_tex(self, tex, fontsize):
+ """
+ Generate a tex file to render the tex string at a specific font size.
+
+ Return the file name.
+ """
+ basefile = self.get_basefile(tex, fontsize)
+ texfile = '%s.tex' % basefile
+ fontcmd = {'sans-serif': r'{\sffamily %s}',
+ 'monospace': r'{\ttfamily %s}'}.get(self._font_family,
+ r'{\rmfamily %s}')
+
+ Path(texfile).write_text(
+ r"""
+%s
+\pagestyle{empty}
+\begin{document}
+%% The empty hbox ensures that a page is printed even for empty inputs, except
+%% when using psfrag which gets confused by it.
+\fontsize{%f}{%f}%%
+\ifdefined\psfrag\else\hbox{}\fi%%
+%s
+\end{document}
+""" % (self._get_preamble(), fontsize, fontsize * 1.25, fontcmd % tex),
+ encoding='utf-8')
+
+ return texfile
+
+ def _run_checked_subprocess(self, command, tex, *, cwd=None):
+ _log.debug(cbook._pformat_subprocess(command))
+ try:
+ report = subprocess.check_output(
+ command, cwd=cwd if cwd is not None else self.texcache,
+ stderr=subprocess.STDOUT)
+ except FileNotFoundError as exc:
+ raise RuntimeError(
+ 'Failed to process string with tex because {} could not be '
+ 'found'.format(command[0])) from exc
+ except subprocess.CalledProcessError as exc:
+ raise RuntimeError(
+ '{prog} was not able to process the following string:\n'
+ '{tex!r}\n\n'
+ 'Here is the full report generated by {prog}:\n'
+ '{exc}\n\n'.format(
+ prog=command[0],
+ tex=tex.encode('unicode_escape'),
+ exc=exc.output.decode('utf-8'))) from exc
+ _log.debug(report)
+ return report
+
+ def make_dvi(self, tex, fontsize):
+ """
+ Generate a dvi file containing latex's layout of tex string.
+
+ Return the file name.
+ """
+ basefile = self.get_basefile(tex, fontsize)
+ dvifile = '%s.dvi' % basefile
+ if not os.path.exists(dvifile):
+ texfile = Path(self.make_tex(tex, fontsize))
+ # Generate the dvi in a temporary directory to avoid race
+ # conditions e.g. if multiple processes try to process the same tex
+ # string at the same time. Having tmpdir be a subdirectory of the
+ # final output dir ensures that they are on the same filesystem,
+ # and thus replace() works atomically. It also allows referring to
+ # the texfile with a relative path (for pathological MPLCONFIGDIRs,
+ # the absolute path may contain characters (e.g. ~) that TeX does
+ # not support.)
+ with TemporaryDirectory(dir=Path(dvifile).parent) as tmpdir:
+ self._run_checked_subprocess(
+ ["latex", "-interaction=nonstopmode", "--halt-on-error",
+ f"../{texfile.name}"], tex, cwd=tmpdir)
+ (Path(tmpdir) / Path(dvifile).name).replace(dvifile)
+ return dvifile
+
+ def make_png(self, tex, fontsize, dpi):
+ """
+ Generate a png file containing latex's rendering of tex string.
+
+ Return the file name.
+ """
+ basefile = self.get_basefile(tex, fontsize, dpi)
+ pngfile = '%s.png' % basefile
+ # see get_rgba for a discussion of the background
+ if not os.path.exists(pngfile):
+ dvifile = self.make_dvi(tex, fontsize)
+ cmd = ["dvipng", "-bg", "Transparent", "-D", str(dpi),
+ "-T", "tight", "-o", pngfile, dvifile]
+ # When testing, disable FreeType rendering for reproducibility; but
+ # dvipng 1.16 has a bug (fixed in f3ff241) that breaks --freetype0
+ # mode, so for it we keep FreeType enabled; the image will be
+ # slightly off.
+ bad_ver = parse_version("1.16")
+ if (getattr(mpl, "_called_from_pytest", False)
+ and mpl._get_executable_info("dvipng").version != bad_ver):
+ cmd.insert(1, "--freetype0")
+ self._run_checked_subprocess(cmd, tex)
+ return pngfile
+
+ def get_grey(self, tex, fontsize=None, dpi=None):
+ """Return the alpha channel."""
+ if not fontsize:
+ fontsize = rcParams['font.size']
+ if not dpi:
+ dpi = rcParams['savefig.dpi']
+ key = tex, self.get_font_config(), fontsize, dpi
+ alpha = self._grey_arrayd.get(key)
+ if alpha is None:
+ pngfile = self.make_png(tex, fontsize, dpi)
+ rgba = mpl.image.imread(os.path.join(self.texcache, pngfile))
+ self._grey_arrayd[key] = alpha = rgba[:, :, -1]
+ return alpha
+
+ def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0, 0, 0)):
+ r"""
+ Return latex's rendering of the tex string as an rgba array.
+
+ Examples
+ --------
+ >>> texmanager = TexManager()
+ >>> s = r"\TeX\ is $\displaystyle\sum_n\frac{-e^{i\pi}}{2^n}$!"
+ >>> Z = texmanager.get_rgba(s, fontsize=12, dpi=80, rgb=(1, 0, 0))
+ """
+ alpha = self.get_grey(tex, fontsize, dpi)
+ rgba = np.empty((*alpha.shape, 4))
+ rgba[..., :3] = mpl.colors.to_rgb(rgb)
+ rgba[..., -1] = alpha
+ return rgba
+
+ def get_text_width_height_descent(self, tex, fontsize, renderer=None):
+ """Return width, height and descent of the text."""
+ if tex.strip() == '':
+ return 0, 0, 0
+ dvifile = self.make_dvi(tex, fontsize)
+ dpi_fraction = renderer.points_to_pixels(1.) if renderer else 1
+ with dviread.Dvi(dvifile, 72 * dpi_fraction) as dvi:
+ page, = dvi
+ # A total height (including the descent) needs to be returned.
+ return page.width, page.height + page.descent, page.descent
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/tight_bbox.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/tight_bbox.py
new file mode 100644
index 0000000000000000000000000000000000000000..5904ebc1fa1ce6d71ac32fa6e780dcc7fbe4ceca
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/tight_bbox.py
@@ -0,0 +1,88 @@
+"""
+Helper module for the *bbox_inches* parameter in `.Figure.savefig`.
+"""
+
+from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
+
+
+def adjust_bbox(fig, bbox_inches, fixed_dpi=None):
+ """
+ Temporarily adjust the figure so that only the specified area
+ (bbox_inches) is saved.
+
+ It modifies fig.bbox, fig.bbox_inches,
+ fig.transFigure._boxout, and fig.patch. While the figure size
+ changes, the scale of the original figure is conserved. A
+ function which restores the original values are returned.
+ """
+ origBbox = fig.bbox
+ origBboxInches = fig.bbox_inches
+ orig_tight_layout = fig.get_tight_layout()
+ _boxout = fig.transFigure._boxout
+
+ fig.set_tight_layout(False)
+
+ old_aspect = []
+ locator_list = []
+ sentinel = object()
+ for ax in fig.axes:
+ locator_list.append(ax.get_axes_locator())
+ current_pos = ax.get_position(original=False).frozen()
+ ax.set_axes_locator(lambda a, r, _pos=current_pos: _pos)
+ # override the method that enforces the aspect ratio on the Axes
+ if 'apply_aspect' in ax.__dict__:
+ old_aspect.append(ax.apply_aspect)
+ else:
+ old_aspect.append(sentinel)
+ ax.apply_aspect = lambda pos=None: None
+
+ def restore_bbox():
+ for ax, loc, aspect in zip(fig.axes, locator_list, old_aspect):
+ ax.set_axes_locator(loc)
+ if aspect is sentinel:
+ # delete our no-op function which un-hides the original method
+ del ax.apply_aspect
+ else:
+ ax.apply_aspect = aspect
+
+ fig.bbox = origBbox
+ fig.bbox_inches = origBboxInches
+ fig.set_tight_layout(orig_tight_layout)
+ fig.transFigure._boxout = _boxout
+ fig.transFigure.invalidate()
+ fig.patch.set_bounds(0, 0, 1, 1)
+
+ if fixed_dpi is None:
+ fixed_dpi = fig.dpi
+ tr = Affine2D().scale(fixed_dpi)
+ dpi_scale = fixed_dpi / fig.dpi
+
+ _bbox = TransformedBbox(bbox_inches, tr)
+
+ fig.bbox_inches = Bbox.from_bounds(0, 0,
+ bbox_inches.width, bbox_inches.height)
+ x0, y0 = _bbox.x0, _bbox.y0
+ w1, h1 = fig.bbox.width * dpi_scale, fig.bbox.height * dpi_scale
+ fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0, w1, h1)
+ fig.transFigure.invalidate()
+
+ fig.bbox = TransformedBbox(fig.bbox_inches, tr)
+
+ fig.patch.set_bounds(x0 / w1, y0 / h1,
+ fig.bbox.width / w1, fig.bbox.height / h1)
+
+ return restore_bbox
+
+
+def process_figure_for_rasterizing(fig, bbox_inches_restore, fixed_dpi=None):
+ """
+ A function that needs to be called when figure dpi changes during the
+ drawing (e.g., rasterizing). It recovers the bbox and re-adjust it with
+ the new dpi.
+ """
+
+ bbox_inches, restore_bbox = bbox_inches_restore
+ restore_bbox()
+ r = adjust_bbox(fig, bbox_inches, fixed_dpi)
+
+ return bbox_inches, r
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/tight_layout.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/tight_layout.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccd4b928af70e4ccf6b73a339a34500c9a54e9cf
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/tight_layout.py
@@ -0,0 +1,355 @@
+"""
+Routines to adjust subplot params so that subplots are
+nicely fit in the figure. In doing so, only axis labels, tick labels, axes
+titles and offsetboxes that are anchored to axes are currently considered.
+
+Internally, this module assumes that the margins (left margin, etc.) which are
+differences between ``Axes.get_tightbbox`` and ``Axes.bbox`` are independent of
+Axes position. This may fail if ``Axes.adjustable`` is ``datalim`` as well as
+such cases as when left or right margin are affected by xlabel.
+"""
+
+import numpy as np
+
+from matplotlib import _api, rcParams
+from matplotlib.font_manager import FontProperties
+from matplotlib.transforms import TransformedBbox, Bbox
+
+
+def _auto_adjust_subplotpars(
+ fig, renderer, shape, span_pairs, subplot_list,
+ ax_bbox_list=None, pad=1.08, h_pad=None, w_pad=None, rect=None):
+ """
+ Return a dict of subplot parameters to adjust spacing between subplots
+ or ``None`` if resulting axes would have zero height or width.
+
+ Note that this function ignores geometry information of subplot itself, but
+ uses what is given by the *shape* and *subplot_list* parameters. Also, the
+ results could be incorrect if some subplots have ``adjustable=datalim``.
+
+ Parameters
+ ----------
+ shape : tuple[int, int]
+ Number of rows and columns of the grid.
+ span_pairs : list[tuple[slice, slice]]
+ List of rowspans and colspans occupied by each subplot.
+ subplot_list : list of subplots
+ List of subplots that will be used to calculate optimal subplot_params.
+ pad : float
+ Padding between the figure edge and the edges of subplots, as a
+ fraction of the font size.
+ h_pad, w_pad : float
+ Padding (height/width) between edges of adjacent subplots, as a
+ fraction of the font size. Defaults to *pad*.
+ rect : tuple[float, float, float, float]
+ [left, bottom, right, top] in normalized (0, 1) figure coordinates.
+ """
+ rows, cols = shape
+
+ font_size_inch = (
+ FontProperties(size=rcParams["font.size"]).get_size_in_points() / 72)
+ pad_inch = pad * font_size_inch
+ vpad_inch = h_pad * font_size_inch if h_pad is not None else pad_inch
+ hpad_inch = w_pad * font_size_inch if w_pad is not None else pad_inch
+
+ if len(span_pairs) != len(subplot_list) or len(subplot_list) == 0:
+ raise ValueError
+
+ if rect is None:
+ margin_left = margin_bottom = margin_right = margin_top = None
+ else:
+ margin_left, margin_bottom, _right, _top = rect
+ margin_right = 1 - _right if _right else None
+ margin_top = 1 - _top if _top else None
+
+ vspaces = np.zeros((rows + 1, cols))
+ hspaces = np.zeros((rows, cols + 1))
+
+ if ax_bbox_list is None:
+ ax_bbox_list = [
+ Bbox.union([ax.get_position(original=True) for ax in subplots])
+ for subplots in subplot_list]
+
+ for subplots, ax_bbox, (rowspan, colspan) in zip(
+ subplot_list, ax_bbox_list, span_pairs):
+ if all(not ax.get_visible() for ax in subplots):
+ continue
+
+ bb = []
+ for ax in subplots:
+ if ax.get_visible():
+ try:
+ bb += [ax.get_tightbbox(renderer, for_layout_only=True)]
+ except TypeError:
+ bb += [ax.get_tightbbox(renderer)]
+
+ tight_bbox_raw = Bbox.union(bb)
+ tight_bbox = TransformedBbox(tight_bbox_raw,
+ fig.transFigure.inverted())
+
+ hspaces[rowspan, colspan.start] += ax_bbox.xmin - tight_bbox.xmin # l
+ hspaces[rowspan, colspan.stop] += tight_bbox.xmax - ax_bbox.xmax # r
+ vspaces[rowspan.start, colspan] += tight_bbox.ymax - ax_bbox.ymax # t
+ vspaces[rowspan.stop, colspan] += ax_bbox.ymin - tight_bbox.ymin # b
+
+ fig_width_inch, fig_height_inch = fig.get_size_inches()
+
+ # margins can be negative for axes with aspect applied, so use max(, 0) to
+ # make them nonnegative.
+ if not margin_left:
+ margin_left = max(hspaces[:, 0].max(), 0) + pad_inch/fig_width_inch
+ suplabel = fig._supylabel
+ if suplabel and suplabel.get_in_layout():
+ rel_width = fig.transFigure.inverted().transform_bbox(
+ suplabel.get_window_extent(renderer)).width
+ margin_left += rel_width + pad_inch/fig_width_inch
+ if not margin_right:
+ margin_right = max(hspaces[:, -1].max(), 0) + pad_inch/fig_width_inch
+ if not margin_top:
+ margin_top = max(vspaces[0, :].max(), 0) + pad_inch/fig_height_inch
+ if fig._suptitle and fig._suptitle.get_in_layout():
+ rel_height = fig.transFigure.inverted().transform_bbox(
+ fig._suptitle.get_window_extent(renderer)).height
+ margin_top += rel_height + pad_inch/fig_height_inch
+ if not margin_bottom:
+ margin_bottom = max(vspaces[-1, :].max(), 0) + pad_inch/fig_height_inch
+ suplabel = fig._supxlabel
+ if suplabel and suplabel.get_in_layout():
+ rel_height = fig.transFigure.inverted().transform_bbox(
+ suplabel.get_window_extent(renderer)).height
+ margin_bottom += rel_height + pad_inch/fig_height_inch
+
+ if margin_left + margin_right >= 1:
+ _api.warn_external('Tight layout not applied. The left and right '
+ 'margins cannot be made large enough to '
+ 'accommodate all axes decorations.')
+ return None
+ if margin_bottom + margin_top >= 1:
+ _api.warn_external('Tight layout not applied. The bottom and top '
+ 'margins cannot be made large enough to '
+ 'accommodate all axes decorations.')
+ return None
+
+ kwargs = dict(left=margin_left,
+ right=1 - margin_right,
+ bottom=margin_bottom,
+ top=1 - margin_top)
+
+ if cols > 1:
+ hspace = hspaces[:, 1:-1].max() + hpad_inch / fig_width_inch
+ # axes widths:
+ h_axes = (1 - margin_right - margin_left - hspace * (cols - 1)) / cols
+ if h_axes < 0:
+ _api.warn_external('Tight layout not applied. tight_layout '
+ 'cannot make axes width small enough to '
+ 'accommodate all axes decorations')
+ return None
+ else:
+ kwargs["wspace"] = hspace / h_axes
+ if rows > 1:
+ vspace = vspaces[1:-1, :].max() + vpad_inch / fig_height_inch
+ v_axes = (1 - margin_top - margin_bottom - vspace * (rows - 1)) / rows
+ if v_axes < 0:
+ _api.warn_external('Tight layout not applied. tight_layout '
+ 'cannot make axes height small enough to '
+ 'accommodate all axes decorations.')
+ return None
+ else:
+ kwargs["hspace"] = vspace / v_axes
+
+ return kwargs
+
+
+@_api.deprecated("3.5")
+def auto_adjust_subplotpars(
+ fig, renderer, nrows_ncols, num1num2_list, subplot_list,
+ ax_bbox_list=None, pad=1.08, h_pad=None, w_pad=None, rect=None):
+ """
+ Return a dict of subplot parameters to adjust spacing between subplots
+ or ``None`` if resulting axes would have zero height or width.
+
+ Note that this function ignores geometry information of subplot
+ itself, but uses what is given by the *nrows_ncols* and *num1num2_list*
+ parameters. Also, the results could be incorrect if some subplots have
+ ``adjustable=datalim``.
+
+ Parameters
+ ----------
+ nrows_ncols : tuple[int, int]
+ Number of rows and number of columns of the grid.
+ num1num2_list : list[tuple[int, int]]
+ List of numbers specifying the area occupied by the subplot
+ subplot_list : list of subplots
+ List of subplots that will be used to calculate optimal subplot_params.
+ pad : float
+ Padding between the figure edge and the edges of subplots, as a
+ fraction of the font size.
+ h_pad, w_pad : float
+ Padding (height/width) between edges of adjacent subplots, as a
+ fraction of the font size. Defaults to *pad*.
+ rect : tuple[float, float, float, float]
+ [left, bottom, right, top] in normalized (0, 1) figure coordinates.
+ """
+ nrows, ncols = nrows_ncols
+ span_pairs = []
+ for n1, n2 in num1num2_list:
+ if n2 is None:
+ n2 = n1
+ span_pairs.append((slice(n1 // ncols, n2 // ncols + 1),
+ slice(n1 % ncols, n2 % ncols + 1)))
+ return _auto_adjust_subplotpars(
+ fig, renderer, nrows_ncols, num1num2_list, subplot_list,
+ ax_bbox_list, pad, h_pad, w_pad, rect)
+
+
+def get_renderer(fig):
+ if fig._cachedRenderer:
+ return fig._cachedRenderer
+ else:
+ canvas = fig.canvas
+ if canvas and hasattr(canvas, "get_renderer"):
+ return canvas.get_renderer()
+ else:
+ from . import backend_bases
+ return backend_bases._get_renderer(fig)
+
+
+def get_subplotspec_list(axes_list, grid_spec=None):
+ """
+ Return a list of subplotspec from the given list of axes.
+
+ For an instance of axes that does not support subplotspec, None is inserted
+ in the list.
+
+ If grid_spec is given, None is inserted for those not from the given
+ grid_spec.
+ """
+ subplotspec_list = []
+ for ax in axes_list:
+ axes_or_locator = ax.get_axes_locator()
+ if axes_or_locator is None:
+ axes_or_locator = ax
+
+ if hasattr(axes_or_locator, "get_subplotspec"):
+ subplotspec = axes_or_locator.get_subplotspec()
+ if subplotspec is not None:
+ subplotspec = subplotspec.get_topmost_subplotspec()
+ gs = subplotspec.get_gridspec()
+ if grid_spec is not None:
+ if gs != grid_spec:
+ subplotspec = None
+ elif gs.locally_modified_subplot_params():
+ subplotspec = None
+ else:
+ subplotspec = None
+
+ subplotspec_list.append(subplotspec)
+
+ return subplotspec_list
+
+
+def get_tight_layout_figure(fig, axes_list, subplotspec_list, renderer,
+ pad=1.08, h_pad=None, w_pad=None, rect=None):
+ """
+ Return subplot parameters for tight-layouted-figure with specified padding.
+
+ Parameters
+ ----------
+ fig : Figure
+ axes_list : list of Axes
+ subplotspec_list : list of `.SubplotSpec`
+ The subplotspecs of each axes.
+ renderer : renderer
+ pad : float
+ Padding between the figure edge and the edges of subplots, as a
+ fraction of the font size.
+ h_pad, w_pad : float
+ Padding (height/width) between edges of adjacent subplots. Defaults to
+ *pad*.
+ rect : tuple[float, float, float, float], optional
+ (left, bottom, right, top) rectangle in normalized figure coordinates
+ that the whole subplots area (including labels) will fit into.
+ Defaults to using the entire figure.
+
+ Returns
+ -------
+ subplotspec or None
+ subplotspec kwargs to be passed to `.Figure.subplots_adjust` or
+ None if tight_layout could not be accomplished.
+ """
+
+ # Multiple axes can share same subplotspec (e.g., if using axes_grid1);
+ # we need to group them together.
+ ss_to_subplots = {ss: [] for ss in subplotspec_list}
+ for ax, ss in zip(axes_list, subplotspec_list):
+ ss_to_subplots[ss].append(ax)
+ ss_to_subplots.pop(None, None) # Skip subplotspec == None.
+ if not ss_to_subplots:
+ return {}
+ subplot_list = list(ss_to_subplots.values())
+ ax_bbox_list = [ss.get_position(fig) for ss in ss_to_subplots]
+
+ max_nrows = max(ss.get_gridspec().nrows for ss in ss_to_subplots)
+ max_ncols = max(ss.get_gridspec().ncols for ss in ss_to_subplots)
+
+ span_pairs = []
+ for ss in ss_to_subplots:
+ # The intent here is to support axes from different gridspecs where
+ # one's nrows (or ncols) is a multiple of the other (e.g. 2 and 4),
+ # but this doesn't actually work because the computed wspace, in
+ # relative-axes-height, corresponds to different physical spacings for
+ # the 2-row grid and the 4-row grid. Still, this code is left, mostly
+ # for backcompat.
+ rows, cols = ss.get_gridspec().get_geometry()
+ div_row, mod_row = divmod(max_nrows, rows)
+ div_col, mod_col = divmod(max_ncols, cols)
+ if mod_row != 0:
+ _api.warn_external('tight_layout not applied: number of rows '
+ 'in subplot specifications must be '
+ 'multiples of one another.')
+ return {}
+ if mod_col != 0:
+ _api.warn_external('tight_layout not applied: number of '
+ 'columns in subplot specifications must be '
+ 'multiples of one another.')
+ return {}
+ span_pairs.append((
+ slice(ss.rowspan.start * div_row, ss.rowspan.stop * div_row),
+ slice(ss.colspan.start * div_col, ss.colspan.stop * div_col)))
+
+ kwargs = _auto_adjust_subplotpars(fig, renderer,
+ shape=(max_nrows, max_ncols),
+ span_pairs=span_pairs,
+ subplot_list=subplot_list,
+ ax_bbox_list=ax_bbox_list,
+ pad=pad, h_pad=h_pad, w_pad=w_pad)
+
+ # kwargs can be none if tight_layout fails...
+ if rect is not None and kwargs is not None:
+ # if rect is given, the whole subplots area (including
+ # labels) will fit into the rect instead of the
+ # figure. Note that the rect argument of
+ # *auto_adjust_subplotpars* specify the area that will be
+ # covered by the total area of axes.bbox. Thus we call
+ # auto_adjust_subplotpars twice, where the second run
+ # with adjusted rect parameters.
+
+ left, bottom, right, top = rect
+ if left is not None:
+ left += kwargs["left"]
+ if bottom is not None:
+ bottom += kwargs["bottom"]
+ if right is not None:
+ right -= (1 - kwargs["right"])
+ if top is not None:
+ top -= (1 - kwargs["top"])
+
+ kwargs = _auto_adjust_subplotpars(fig, renderer,
+ shape=(max_nrows, max_ncols),
+ span_pairs=span_pairs,
+ subplot_list=subplot_list,
+ ax_bbox_list=ax_bbox_list,
+ pad=pad, h_pad=h_pad, w_pad=w_pad,
+ rect=(left, bottom, right, top))
+
+ return kwargs
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/flax.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/flax.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0b8375e038eff487af33fcfaa4a597aacb5743f
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/flax.py
@@ -0,0 +1,138 @@
+import os
+from typing import Dict, Optional, Union
+
+import numpy as np
+
+import jax.numpy as jnp
+from jax import Array
+from safetensors import numpy, safe_open
+
+
+def save(tensors: Dict[str, Array], metadata: Optional[Dict[str, str]] = None) -> bytes:
+ """
+ Saves a dictionary of tensors into raw bytes in safetensors format.
+
+ Args:
+ tensors (`Dict[str, Array]`):
+ The incoming tensors. Tensors need to be contiguous and dense.
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
+ Optional text only metadata you might want to save in your header.
+ For instance it can be useful to specify more about the underlying
+ tensors. This is purely informative and does not affect tensor loading.
+
+ Returns:
+ `bytes`: The raw bytes representing the format
+
+ Example:
+
+ ```python
+ from safetensors.flax import save
+ from jax import numpy as jnp
+
+ tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))}
+ byte_data = save(tensors)
+ ```
+ """
+ np_tensors = _jnp2np(tensors)
+ return numpy.save(np_tensors, metadata=metadata)
+
+
+def save_file(
+ tensors: Dict[str, Array],
+ filename: Union[str, os.PathLike],
+ metadata: Optional[Dict[str, str]] = None,
+) -> None:
+ """
+ Saves a dictionary of tensors into raw bytes in safetensors format.
+
+ Args:
+ tensors (`Dict[str, Array]`):
+ The incoming tensors. Tensors need to be contiguous and dense.
+ filename (`str`, or `os.PathLike`)):
+ The filename we're saving into.
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
+ Optional text only metadata you might want to save in your header.
+ For instance it can be useful to specify more about the underlying
+ tensors. This is purely informative and does not affect tensor loading.
+
+ Returns:
+ `None`
+
+ Example:
+
+ ```python
+ from safetensors.flax import save_file
+ from jax import numpy as jnp
+
+ tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))}
+ save_file(tensors, "model.safetensors")
+ ```
+ """
+ np_tensors = _jnp2np(tensors)
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
+
+
+def load(data: bytes) -> Dict[str, Array]:
+ """
+ Loads a safetensors file into flax format from pure bytes.
+
+ Args:
+ data (`bytes`):
+ The content of a safetensors file
+
+ Returns:
+ `Dict[str, Array]`: dictionary that contains name as key, value as `Array` on cpu
+
+ Example:
+
+ ```python
+ from safetensors.flax import load
+
+ file_path = "./my_folder/bert.safetensors"
+ with open(file_path, "rb") as f:
+ data = f.read()
+
+ loaded = load(data)
+ ```
+ """
+ flat = numpy.load(data)
+ return _np2jnp(flat)
+
+
+def load_file(filename: Union[str, os.PathLike]) -> Dict[str, Array]:
+ """
+ Loads a safetensors file into flax format.
+
+ Args:
+ filename (`str`, or `os.PathLike`)):
+ The name of the file which contains the tensors
+
+ Returns:
+ `Dict[str, Array]`: dictionary that contains name as key, value as `Array`
+
+ Example:
+
+ ```python
+ from safetensors.flax import load_file
+
+ file_path = "./my_folder/bert.safetensors"
+ loaded = load_file(file_path)
+ ```
+ """
+ result = {}
+ with safe_open(filename, framework="flax") as f:
+ for k in f.keys():
+ result[k] = f.get_tensor(k)
+ return result
+
+
+def _np2jnp(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, Array]:
+ for k, v in numpy_dict.items():
+ numpy_dict[k] = jnp.array(v)
+ return numpy_dict
+
+
+def _jnp2np(jnp_dict: Dict[str, Array]) -> Dict[str, np.array]:
+ for k, v in jnp_dict.items():
+ jnp_dict[k] = np.asarray(v)
+ return jnp_dict
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/mlx.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/mlx.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf9fe37519c817e4d9db87e8ce53c2dc8b85254f
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/mlx.py
@@ -0,0 +1,138 @@
+import os
+from typing import Dict, Optional, Union
+
+import numpy as np
+
+import mlx.core as mx
+from safetensors import numpy, safe_open
+
+
+def save(tensors: Dict[str, mx.array], metadata: Optional[Dict[str, str]] = None) -> bytes:
+ """
+ Saves a dictionary of tensors into raw bytes in safetensors format.
+
+ Args:
+ tensors (`Dict[str, mx.array]`):
+ The incoming tensors. Tensors need to be contiguous and dense.
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
+ Optional text only metadata you might want to save in your header.
+ For instance it can be useful to specify more about the underlying
+ tensors. This is purely informative and does not affect tensor loading.
+
+ Returns:
+ `bytes`: The raw bytes representing the format
+
+ Example:
+
+ ```python
+ from safetensors.mlx import save
+ import mlx.core as mx
+
+ tensors = {"embedding": mx.zeros((512, 1024)), "attention": mx.zeros((256, 256))}
+ byte_data = save(tensors)
+ ```
+ """
+ np_tensors = _mx2np(tensors)
+ return numpy.save(np_tensors, metadata=metadata)
+
+
+def save_file(
+ tensors: Dict[str, mx.array],
+ filename: Union[str, os.PathLike],
+ metadata: Optional[Dict[str, str]] = None,
+) -> None:
+ """
+ Saves a dictionary of tensors into raw bytes in safetensors format.
+
+ Args:
+ tensors (`Dict[str, mx.array]`):
+ The incoming tensors. Tensors need to be contiguous and dense.
+ filename (`str`, or `os.PathLike`)):
+ The filename we're saving into.
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
+ Optional text only metadata you might want to save in your header.
+ For instance it can be useful to specify more about the underlying
+ tensors. This is purely informative and does not affect tensor loading.
+
+ Returns:
+ `None`
+
+ Example:
+
+ ```python
+ from safetensors.mlx import save_file
+ import mlx.core as mx
+
+ tensors = {"embedding": mx.zeros((512, 1024)), "attention": mx.zeros((256, 256))}
+ save_file(tensors, "model.safetensors")
+ ```
+ """
+ np_tensors = _mx2np(tensors)
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
+
+
+def load(data: bytes) -> Dict[str, mx.array]:
+ """
+ Loads a safetensors file into MLX format from pure bytes.
+
+ Args:
+ data (`bytes`):
+ The content of a safetensors file
+
+ Returns:
+ `Dict[str, mx.array]`: dictionary that contains name as key, value as `mx.array`
+
+ Example:
+
+ ```python
+ from safetensors.mlx import load
+
+ file_path = "./my_folder/bert.safetensors"
+ with open(file_path, "rb") as f:
+ data = f.read()
+
+ loaded = load(data)
+ ```
+ """
+ flat = numpy.load(data)
+ return _np2mx(flat)
+
+
+def load_file(filename: Union[str, os.PathLike]) -> Dict[str, mx.array]:
+ """
+ Loads a safetensors file into MLX format.
+
+ Args:
+ filename (`str`, or `os.PathLike`)):
+ The name of the file which contains the tensors
+
+ Returns:
+ `Dict[str, mx.array]`: dictionary that contains name as key, value as `mx.array`
+
+ Example:
+
+ ```python
+ from safetensors.flax import load_file
+
+ file_path = "./my_folder/bert.safetensors"
+ loaded = load_file(file_path)
+ ```
+ """
+ result = {}
+ with safe_open(filename, framework="mlx") as f:
+ for k in f.keys():
+ result[k] = f.get_tensor(k)
+ return result
+
+
+def _np2mx(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, mx.array]:
+ for k, v in numpy_dict.items():
+ numpy_dict[k] = mx.array(v)
+ return numpy_dict
+
+
+def _mx2np(mx_dict: Dict[str, mx.array]) -> Dict[str, np.array]:
+ new_dict = {}
+ for k, v in mx_dict.items():
+ new_dict[k] = np.asarray(v)
+ return new_dict
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/numpy.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/numpy.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b245f12c1c949456c9b2edb45a11343e6a8099a
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/numpy.py
@@ -0,0 +1,176 @@
+import os
+import sys
+from typing import Dict, Optional, Union
+
+import numpy as np
+
+from safetensors import deserialize, safe_open, serialize, serialize_file
+
+
+def _tobytes(tensor: np.ndarray) -> bytes:
+ if not _is_little_endian(tensor):
+ tensor = tensor.byteswap(inplace=False)
+ return tensor.tobytes()
+
+
+def save(tensor_dict: Dict[str, np.ndarray], metadata: Optional[Dict[str, str]] = None) -> bytes:
+ """
+ Saves a dictionary of tensors into raw bytes in safetensors format.
+
+ Args:
+ tensor_dict (`Dict[str, np.ndarray]`):
+ The incoming tensors. Tensors need to be contiguous and dense.
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
+ Optional text only metadata you might want to save in your header.
+ For instance it can be useful to specify more about the underlying
+ tensors. This is purely informative and does not affect tensor loading.
+
+ Returns:
+ `bytes`: The raw bytes representing the format
+
+ Example:
+
+ ```python
+ from safetensors.numpy import save
+ import numpy as np
+
+ tensors = {"embedding": np.zeros((512, 1024)), "attention": np.zeros((256, 256))}
+ byte_data = save(tensors)
+ ```
+ """
+ flattened = {k: {"dtype": v.dtype.name, "shape": v.shape, "data": _tobytes(v)} for k, v in tensor_dict.items()}
+ serialized = serialize(flattened, metadata=metadata)
+ result = bytes(serialized)
+ return result
+
+
+def save_file(
+ tensor_dict: Dict[str, np.ndarray], filename: Union[str, os.PathLike], metadata: Optional[Dict[str, str]] = None
+) -> None:
+ """
+ Saves a dictionary of tensors into raw bytes in safetensors format.
+
+ Args:
+ tensor_dict (`Dict[str, np.ndarray]`):
+ The incoming tensors. Tensors need to be contiguous and dense.
+ filename (`str`, or `os.PathLike`)):
+ The filename we're saving into.
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
+ Optional text only metadata you might want to save in your header.
+ For instance it can be useful to specify more about the underlying
+ tensors. This is purely informative and does not affect tensor loading.
+
+ Returns:
+ `None`
+
+ Example:
+
+ ```python
+ from safetensors.numpy import save_file
+ import numpy as np
+
+ tensors = {"embedding": np.zeros((512, 1024)), "attention": np.zeros((256, 256))}
+ save_file(tensors, "model.safetensors")
+ ```
+ """
+ flattened = {k: {"dtype": v.dtype.name, "shape": v.shape, "data": _tobytes(v)} for k, v in tensor_dict.items()}
+ serialize_file(flattened, filename, metadata=metadata)
+
+
+def load(data: bytes) -> Dict[str, np.ndarray]:
+ """
+ Loads a safetensors file into numpy format from pure bytes.
+
+ Args:
+ data (`bytes`):
+ The content of a safetensors file
+
+ Returns:
+ `Dict[str, np.ndarray]`: dictionary that contains name as key, value as `np.ndarray` on cpu
+
+ Example:
+
+ ```python
+ from safetensors.numpy import load
+
+ file_path = "./my_folder/bert.safetensors"
+ with open(file_path, "rb") as f:
+ data = f.read()
+
+ loaded = load(data)
+ ```
+ """
+ flat = deserialize(data)
+ return _view2np(flat)
+
+
+def load_file(filename: Union[str, os.PathLike]) -> Dict[str, np.ndarray]:
+ """
+ Loads a safetensors file into numpy format.
+
+ Args:
+ filename (`str`, or `os.PathLike`)):
+ The name of the file which contains the tensors
+
+ Returns:
+ `Dict[str, np.ndarray]`: dictionary that contains name as key, value as `np.ndarray`
+
+ Example:
+
+ ```python
+ from safetensors.numpy import load_file
+
+ file_path = "./my_folder/bert.safetensors"
+ loaded = load_file(file_path)
+ ```
+ """
+ result = {}
+ with safe_open(filename, framework="np") as f:
+ for k in f.keys():
+ result[k] = f.get_tensor(k)
+ return result
+
+
+_TYPES = {
+ "F64": np.float64,
+ "F32": np.float32,
+ "F16": np.float16,
+ "I64": np.int64,
+ "U64": np.uint64,
+ "I32": np.int32,
+ "U32": np.uint32,
+ "I16": np.int16,
+ "U16": np.uint16,
+ "I8": np.int8,
+ "U8": np.uint8,
+ "BOOL": bool,
+}
+
+
+def _getdtype(dtype_str: str) -> np.dtype:
+ return _TYPES[dtype_str]
+
+
+def _view2np(safeview) -> Dict[str, np.ndarray]:
+ result = {}
+ for k, v in safeview:
+ dtype = _getdtype(v["dtype"])
+ arr = np.frombuffer(v["data"], dtype=dtype).reshape(v["shape"])
+ result[k] = arr
+ return result
+
+
+def _is_little_endian(tensor: np.ndarray) -> bool:
+ byteorder = tensor.dtype.byteorder
+ if byteorder == "=":
+ if sys.byteorder == "little":
+ return True
+ else:
+ return False
+ elif byteorder == "|":
+ return True
+ elif byteorder == "<":
+ return True
+ elif byteorder == ">":
+ return False
+ raise ValueError(f"Unexpected byte order {byteorder}")
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/py.typed b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/tensorflow.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/tensorflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2d74b0522698b3748a7da93753e065f4053beea
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/tensorflow.py
@@ -0,0 +1,137 @@
+import os
+from typing import Dict, Optional, Union
+
+import numpy as np
+import tensorflow as tf
+
+from safetensors import numpy, safe_open
+
+
+def save(tensors: Dict[str, tf.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes:
+ """
+ Saves a dictionary of tensors into raw bytes in safetensors format.
+
+ Args:
+ tensors (`Dict[str, tf.Tensor]`):
+ The incoming tensors. Tensors need to be contiguous and dense.
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
+ Optional text only metadata you might want to save in your header.
+ For instance it can be useful to specify more about the underlying
+ tensors. This is purely informative and does not affect tensor loading.
+
+ Returns:
+ `bytes`: The raw bytes representing the format
+
+ Example:
+
+ ```python
+ from safetensors.tensorflow import save
+ import tensorflow as tf
+
+ tensors = {"embedding": tf.zeros((512, 1024)), "attention": tf.zeros((256, 256))}
+ byte_data = save(tensors)
+ ```
+ """
+ np_tensors = _tf2np(tensors)
+ return numpy.save(np_tensors, metadata=metadata)
+
+
+def save_file(
+ tensors: Dict[str, tf.Tensor],
+ filename: Union[str, os.PathLike],
+ metadata: Optional[Dict[str, str]] = None,
+) -> None:
+ """
+ Saves a dictionary of tensors into raw bytes in safetensors format.
+
+ Args:
+ tensors (`Dict[str, tf.Tensor]`):
+ The incoming tensors. Tensors need to be contiguous and dense.
+ filename (`str`, or `os.PathLike`)):
+ The filename we're saving into.
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
+ Optional text only metadata you might want to save in your header.
+ For instance it can be useful to specify more about the underlying
+ tensors. This is purely informative and does not affect tensor loading.
+
+ Returns:
+ `None`
+
+ Example:
+
+ ```python
+ from safetensors.tensorflow import save_file
+ import tensorflow as tf
+
+ tensors = {"embedding": tf.zeros((512, 1024)), "attention": tf.zeros((256, 256))}
+ save_file(tensors, "model.safetensors")
+ ```
+ """
+ np_tensors = _tf2np(tensors)
+ return numpy.save_file(np_tensors, filename, metadata=metadata)
+
+
+def load(data: bytes) -> Dict[str, tf.Tensor]:
+ """
+ Loads a safetensors file into tensorflow format from pure bytes.
+
+ Args:
+ data (`bytes`):
+ The content of a safetensors file
+
+ Returns:
+ `Dict[str, tf.Tensor]`: dictionary that contains name as key, value as `tf.Tensor` on cpu
+
+ Example:
+
+ ```python
+ from safetensors.tensorflow import load
+
+ file_path = "./my_folder/bert.safetensors"
+ with open(file_path, "rb") as f:
+ data = f.read()
+
+ loaded = load(data)
+ ```
+ """
+ flat = numpy.load(data)
+ return _np2tf(flat)
+
+
+def load_file(filename: Union[str, os.PathLike]) -> Dict[str, tf.Tensor]:
+ """
+ Loads a safetensors file into tensorflow format.
+
+ Args:
+ filename (`str`, or `os.PathLike`)):
+ The name of the file which contains the tensors
+
+ Returns:
+ `Dict[str, tf.Tensor]`: dictionary that contains name as key, value as `tf.Tensor`
+
+ Example:
+
+ ```python
+ from safetensors.tensorflow import load_file
+
+ file_path = "./my_folder/bert.safetensors"
+ loaded = load_file(file_path)
+ ```
+ """
+ result = {}
+ with safe_open(filename, framework="tf") as f:
+ for k in f.keys():
+ result[k] = f.get_tensor(k)
+ return result
+
+
+def _np2tf(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, tf.Tensor]:
+ for k, v in numpy_dict.items():
+ numpy_dict[k] = tf.convert_to_tensor(v)
+ return numpy_dict
+
+
+def _tf2np(tf_dict: Dict[str, tf.Tensor]) -> Dict[str, np.array]:
+ for k, v in tf_dict.items():
+ tf_dict[k] = v.numpy()
+ return tf_dict
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/torch.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/torch.py
new file mode 100644
index 0000000000000000000000000000000000000000..22915c98a5e002e829c67739499b355c406c9e6d
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/safetensors/torch.py
@@ -0,0 +1,492 @@
+import os
+import sys
+from collections import defaultdict
+from typing import Any, Dict, List, Optional, Set, Tuple, Union
+
+import torch
+
+from safetensors import deserialize, safe_open, serialize, serialize_file
+
+
+def storage_ptr(tensor: torch.Tensor) -> int:
+ try:
+ return tensor.untyped_storage().data_ptr()
+ except Exception:
+ # Fallback for torch==1.10
+ try:
+ return tensor.storage().data_ptr()
+ except NotImplementedError:
+ # Fallback for meta storage
+ return 0
+
+
+def _end_ptr(tensor: torch.Tensor) -> int:
+ if tensor.nelement():
+ stop = tensor.view(-1)[-1].data_ptr() + _SIZE[tensor.dtype]
+ else:
+ stop = tensor.data_ptr()
+ return stop
+
+
+def storage_size(tensor: torch.Tensor) -> int:
+ try:
+ return tensor.untyped_storage().nbytes()
+ except AttributeError:
+ # Fallback for torch==1.10
+ try:
+ return tensor.storage().size() * _SIZE[tensor.dtype]
+ except NotImplementedError:
+ # Fallback for meta storage
+ # On torch >=2.0 this is the tensor size
+ return tensor.nelement() * _SIZE[tensor.dtype]
+
+
+def _filter_shared_not_shared(tensors: List[Set[str]], state_dict: Dict[str, torch.Tensor]) -> List[Set[str]]:
+ filtered_tensors = []
+ for shared in tensors:
+ if len(shared) < 2:
+ filtered_tensors.append(shared)
+ continue
+
+ areas = []
+ for name in shared:
+ tensor = state_dict[name]
+ areas.append((tensor.data_ptr(), _end_ptr(tensor), name))
+ areas.sort()
+
+ _, last_stop, last_name = areas[0]
+ filtered_tensors.append({last_name})
+ for start, stop, name in areas[1:]:
+ if start >= last_stop:
+ filtered_tensors.append({name})
+ else:
+ filtered_tensors[-1].add(name)
+ last_stop = stop
+
+ return filtered_tensors
+
+
+def _find_shared_tensors(state_dict: Dict[str, torch.Tensor]) -> List[Set[str]]:
+ tensors = defaultdict(set)
+ for k, v in state_dict.items():
+ if v.device != torch.device("meta") and storage_ptr(v) != 0 and storage_size(v) != 0:
+ # Need to add device as key because of multiple GPU.
+ tensors[(v.device, storage_ptr(v), storage_size(v))].add(k)
+ tensors = list(sorted(tensors.values()))
+ tensors = _filter_shared_not_shared(tensors, state_dict)
+ return tensors
+
+
+def _is_complete(tensor: torch.Tensor) -> bool:
+ return tensor.data_ptr() == storage_ptr(tensor) and tensor.nelement() * _SIZE[tensor.dtype] == storage_size(tensor)
+
+
+def _remove_duplicate_names(
+ state_dict: Dict[str, torch.Tensor],
+ *,
+ preferred_names: Optional[List[str]] = None,
+ discard_names: Optional[List[str]] = None,
+) -> Dict[str, List[str]]:
+ if preferred_names is None:
+ preferred_names = []
+ preferred_names = set(preferred_names)
+ if discard_names is None:
+ discard_names = []
+ discard_names = set(discard_names)
+
+ shareds = _find_shared_tensors(state_dict)
+ to_remove = defaultdict(list)
+ for shared in shareds:
+ complete_names = set([name for name in shared if _is_complete(state_dict[name])])
+ if not complete_names:
+ raise RuntimeError(
+ "Error while trying to find names to remove to save state dict, but found no suitable name to keep"
+ f" for saving amongst: {shared}. None is covering the entire storage.Refusing to save/load the model"
+ " since you could be storing much more memory than needed. Please refer to"
+ " https://huggingface.co/docs/safetensors/torch_shared_tensors for more information. Or open an"
+ " issue."
+ )
+
+ keep_name = sorted(list(complete_names))[0]
+
+ # Mechanism to preferentially select keys to keep
+ # coming from the on-disk file to allow
+ # loading models saved with a different choice
+ # of keep_name
+ preferred = complete_names.difference(discard_names)
+ if preferred:
+ keep_name = sorted(list(preferred))[0]
+
+ if preferred_names:
+ preferred = preferred_names.intersection(complete_names)
+ if preferred:
+ keep_name = sorted(list(preferred))[0]
+ for name in sorted(shared):
+ if name != keep_name:
+ to_remove[keep_name].append(name)
+ return to_remove
+
+
+def save_model(
+ model: torch.nn.Module, filename: str, metadata: Optional[Dict[str, str]] = None, force_contiguous: bool = True
+):
+ """
+ Saves a given torch model to specified filename.
+ This method exists specifically to avoid tensor sharing issues which are
+ not allowed in `safetensors`. [More information on tensor sharing](../torch_shared_tensors)
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to save on disk.
+ filename (`str`):
+ The filename location to save the file
+ metadata (`Dict[str, str]`, *optional*):
+ Extra information to save along with the file.
+ Some metadata will be added for each dropped tensors.
+ This information will not be enough to recover the entire
+ shared structure but might help understanding things
+ force_contiguous (`boolean`, *optional*, defaults to True):
+ Forcing the state_dict to be saved as contiguous tensors.
+ This has no effect on the correctness of the model, but it
+ could potentially change performance if the layout of the tensor
+ was chosen specifically for that reason.
+ """
+ state_dict = model.state_dict()
+ to_removes = _remove_duplicate_names(state_dict)
+
+ for kept_name, to_remove_group in to_removes.items():
+ for to_remove in to_remove_group:
+ if metadata is None:
+ metadata = {}
+
+ if to_remove not in metadata:
+ # Do not override user data
+ metadata[to_remove] = kept_name
+ del state_dict[to_remove]
+ if force_contiguous:
+ state_dict = {k: v.contiguous() for k, v in state_dict.items()}
+ try:
+ save_file(state_dict, filename, metadata=metadata)
+ except ValueError as e:
+ msg = str(e)
+ msg += " Or use save_model(..., force_contiguous=True), read the docs for potential caveats."
+ raise ValueError(msg)
+
+
+def load_model(model: torch.nn.Module, filename: Union[str, os.PathLike], strict=True) -> Tuple[List[str], List[str]]:
+ """
+ Loads a given filename onto a torch model.
+ This method exists specifically to avoid tensor sharing issues which are
+ not allowed in `safetensors`. [More information on tensor sharing](../torch_shared_tensors)
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to load onto.
+ filename (`str`, or `os.PathLike`):
+ The filename location to load the file from.
+ strict (`bool`, *optional*, defaults to True):
+ Wether to fail if you're missing keys or having unexpected ones
+ When false, the function simply returns missing and unexpected names.
+
+ Returns:
+ `(missing, unexpected): (List[str], List[str])`
+ `missing` are names in the model which were not modified during loading
+ `unexpected` are names that are on the file, but weren't used during
+ the load.
+ """
+ state_dict = load_file(filename)
+ model_state_dict = model.state_dict()
+ to_removes = _remove_duplicate_names(model_state_dict, preferred_names=state_dict.keys())
+ missing, unexpected = model.load_state_dict(state_dict, strict=False)
+ missing = set(missing)
+ for to_remove_group in to_removes.values():
+ for to_remove in to_remove_group:
+ if to_remove not in missing:
+ unexpected.append(to_remove)
+ else:
+ missing.remove(to_remove)
+ if strict and (missing or unexpected):
+ missing_keys = ", ".join([f'"{k}"' for k in sorted(missing)])
+ unexpected_keys = ", ".join([f'"{k}"' for k in sorted(unexpected)])
+ error = f"Error(s) in loading state_dict for {model.__class__.__name__}:"
+ if missing:
+ error += f"\n Missing key(s) in state_dict: {missing_keys}"
+ if unexpected:
+ error += f"\n Unexpected key(s) in state_dict: {unexpected_keys}"
+ raise RuntimeError(error)
+ return missing, unexpected
+
+
+def save(tensors: Dict[str, torch.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes:
+ """
+ Saves a dictionary of tensors into raw bytes in safetensors format.
+
+ Args:
+ tensors (`Dict[str, torch.Tensor]`):
+ The incoming tensors. Tensors need to be contiguous and dense.
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
+ Optional text only metadata you might want to save in your header.
+ For instance it can be useful to specify more about the underlying
+ tensors. This is purely informative and does not affect tensor loading.
+
+ Returns:
+ `bytes`: The raw bytes representing the format
+
+ Example:
+
+ ```python
+ from safetensors.torch import save
+ import torch
+
+ tensors = {"embedding": torch.zeros((512, 1024)), "attention": torch.zeros((256, 256))}
+ byte_data = save(tensors)
+ ```
+ """
+ serialized = serialize(_flatten(tensors), metadata=metadata)
+ result = bytes(serialized)
+ return result
+
+
+def save_file(
+ tensors: Dict[str, torch.Tensor],
+ filename: Union[str, os.PathLike],
+ metadata: Optional[Dict[str, str]] = None,
+):
+ """
+ Saves a dictionary of tensors into raw bytes in safetensors format.
+
+ Args:
+ tensors (`Dict[str, torch.Tensor]`):
+ The incoming tensors. Tensors need to be contiguous and dense.
+ filename (`str`, or `os.PathLike`)):
+ The filename we're saving into.
+ metadata (`Dict[str, str]`, *optional*, defaults to `None`):
+ Optional text only metadata you might want to save in your header.
+ For instance it can be useful to specify more about the underlying
+ tensors. This is purely informative and does not affect tensor loading.
+
+ Returns:
+ `None`
+
+ Example:
+
+ ```python
+ from safetensors.torch import save_file
+ import torch
+
+ tensors = {"embedding": torch.zeros((512, 1024)), "attention": torch.zeros((256, 256))}
+ save_file(tensors, "model.safetensors")
+ ```
+ """
+ serialize_file(_flatten(tensors), filename, metadata=metadata)
+
+
+def load_file(filename: Union[str, os.PathLike], device="cpu") -> Dict[str, torch.Tensor]:
+ """
+ Loads a safetensors file into torch format.
+
+ Args:
+ filename (`str`, or `os.PathLike`):
+ The name of the file which contains the tensors
+ device (`Dict[str, any]`, *optional*, defaults to `cpu`):
+ The device where the tensors need to be located after load.
+ available options are all regular torch device locations
+
+ Returns:
+ `Dict[str, torch.Tensor]`: dictionary that contains name as key, value as `torch.Tensor`
+
+ Example:
+
+ ```python
+ from safetensors.torch import load_file
+
+ file_path = "./my_folder/bert.safetensors"
+ loaded = load_file(file_path)
+ ```
+ """
+ result = {}
+ with safe_open(filename, framework="pt", device=device) as f:
+ for k in f.keys():
+ result[k] = f.get_tensor(k)
+ return result
+
+
+def load(data: bytes) -> Dict[str, torch.Tensor]:
+ """
+ Loads a safetensors file into torch format from pure bytes.
+
+ Args:
+ data (`bytes`):
+ The content of a safetensors file
+
+ Returns:
+ `Dict[str, torch.Tensor]`: dictionary that contains name as key, value as `torch.Tensor` on cpu
+
+ Example:
+
+ ```python
+ from safetensors.torch import load
+
+ file_path = "./my_folder/bert.safetensors"
+ with open(file_path, "rb") as f:
+ data = f.read()
+
+ loaded = load(data)
+ ```
+ """
+ flat = deserialize(data)
+ return _view2torch(flat)
+
+# torch.float8 formats require 2.1; we do not support these dtypes on earlier versions
+_float8_e4m3fn = getattr(torch, "float8_e4m3fn", None)
+_float8_e5m2 = getattr(torch, "float8_e5m2", None)
+
+_SIZE = {
+ torch.int64: 8,
+ torch.float32: 4,
+ torch.int32: 4,
+ torch.bfloat16: 2,
+ torch.float16: 2,
+ torch.int16: 2,
+ torch.uint8: 1,
+ torch.int8: 1,
+ torch.bool: 1,
+ torch.float64: 8,
+ _float8_e4m3fn: 1,
+ _float8_e5m2: 1,
+}
+
+_TYPES = {
+ "F64": torch.float64,
+ "F32": torch.float32,
+ "F16": torch.float16,
+ "BF16": torch.bfloat16,
+ "I64": torch.int64,
+ # "U64": torch.uint64,
+ "I32": torch.int32,
+ # "U32": torch.uint32,
+ "I16": torch.int16,
+ # "U16": torch.uint16,
+ "I8": torch.int8,
+ "U8": torch.uint8,
+ "BOOL": torch.bool,
+ "F8_E4M3": _float8_e4m3fn,
+ "F8_E5M2": _float8_e5m2,
+}
+
+
+def _getdtype(dtype_str: str) -> torch.dtype:
+ return _TYPES[dtype_str]
+
+
+def _view2torch(safeview) -> Dict[str, torch.Tensor]:
+ result = {}
+ for k, v in safeview:
+ dtype = _getdtype(v["dtype"])
+ arr = torch.frombuffer(v["data"], dtype=dtype).reshape(v["shape"])
+ if sys.byteorder == "big":
+ arr = torch.from_numpy(arr.numpy().byteswap(inplace=False))
+ result[k] = arr
+
+ return result
+
+
+def _tobytes(tensor: torch.Tensor, name: str) -> bytes:
+ if tensor.layout != torch.strided:
+ raise ValueError(
+ f"You are trying to save a sparse tensor: `{name}` which this library does not support."
+ " You can make it a dense tensor before saving with `.to_dense()` but be aware this might"
+ " make a much larger file than needed."
+ )
+
+ if not tensor.is_contiguous():
+ raise ValueError(
+ f"You are trying to save a non contiguous tensor: `{name}` which is not allowed. It either means you"
+ " are trying to save tensors which are reference of each other in which case it's recommended to save"
+ " only the full tensors, and reslice at load time, or simply call `.contiguous()` on your tensor to"
+ " pack it before saving."
+ )
+ if tensor.device.type != "cpu":
+ # Moving tensor to cpu before saving
+ tensor = tensor.to("cpu")
+
+ import ctypes
+
+ import numpy as np
+
+ # When shape is empty (scalar), np.prod returns a float
+ # we need a int for the following calculations
+ length = int(np.prod(tensor.shape).item())
+ bytes_per_item = _SIZE[tensor.dtype]
+
+ total_bytes = length * bytes_per_item
+
+ ptr = tensor.data_ptr()
+ if ptr == 0:
+ return b""
+ newptr = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_ubyte))
+ data = np.ctypeslib.as_array(newptr, (total_bytes,)) # no internal copy
+ if sys.byteorder == "big":
+ NPDTYPES = {
+ torch.int64: np.int64,
+ torch.float32: np.float32,
+ torch.int32: np.int32,
+ # XXX: This is ok because both have the same width
+ torch.bfloat16: np.float16,
+ torch.float16: np.float16,
+ torch.int16: np.int16,
+ torch.uint8: np.uint8,
+ torch.int8: np.int8,
+ torch.bool: bool,
+ torch.float64: np.float64,
+ # XXX: This is ok because both have the same width and byteswap is a no-op anyway
+ _float8_e4m3fn: np.uint8,
+ _float8_e5m2: np.uint8,
+ }
+ npdtype = NPDTYPES[tensor.dtype]
+ # Not in place as that would potentially modify a live running model
+ data = data.view(npdtype).byteswap(inplace=False)
+ return data.tobytes()
+
+
+def _flatten(tensors: Dict[str, torch.Tensor]) -> Dict[str, Dict[str, Any]]:
+ if not isinstance(tensors, dict):
+ raise ValueError(f"Expected a dict of [str, torch.Tensor] but received {type(tensors)}")
+
+ invalid_tensors = []
+ for k, v in tensors.items():
+ if not isinstance(v, torch.Tensor):
+ raise ValueError(f"Key `{k}` is invalid, expected torch.Tensor but received {type(v)}")
+
+ if v.layout != torch.strided:
+ invalid_tensors.append(k)
+ if invalid_tensors:
+ raise ValueError(
+ f"You are trying to save a sparse tensors: `{invalid_tensors}` which this library does not support."
+ " You can make it a dense tensor before saving with `.to_dense()` but be aware this might"
+ " make a much larger file than needed."
+ )
+
+ shared_pointers = _find_shared_tensors(tensors)
+ failing = []
+ for names in shared_pointers:
+ if len(names) > 1:
+ failing.append(names)
+
+ if failing:
+ raise RuntimeError(
+ f"""
+ Some tensors share memory, this will lead to duplicate memory on disk and potential differences when loading them again: {failing}.
+ A potential way to correctly save your model is to use `save_model`.
+ More information at https://huggingface.co/docs/safetensors/torch_shared_tensors
+ """
+ )
+
+ return {
+ k: {
+ "dtype": str(v.dtype).split(".")[-1],
+ "shape": v.shape,
+ "data": _tobytes(v, k),
+ }
+ for k, v in tensors.items()
+ }
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tensorboard_plugin_wit/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tensorboard_plugin_wit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tensorboard_plugin_wit/wit_plugin.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tensorboard_plugin_wit/wit_plugin.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8991f9d7f55dc9b15f32cb33966a35cd84236c4
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tensorboard_plugin_wit/wit_plugin.py
@@ -0,0 +1,541 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""The plugin serving the interactive inference tab."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import copy
+import io
+import json
+import logging
+import math
+import numpy as np
+import os
+import werkzeug
+from werkzeug import wrappers
+from six.moves import xrange # pylint: disable=redefined-builtin
+
+from google.protobuf import json_format
+from grpc.framework.interfaces.face.face import AbortionError
+from werkzeug import wrappers
+
+import tensorflow as tf
+
+from tensorboard.backend import http_util
+from tensorboard.plugins import base_plugin
+from tensorboard_plugin_wit._utils import common_utils
+from tensorboard_plugin_wit._utils import inference_utils
+from tensorboard_plugin_wit._utils import platform_utils
+
+logger = logging.getLogger('tensorboard')
+
+
+# Max number of examples to scan along the `examples_path` in order to return
+# statistics and sampling for features.
+NUM_EXAMPLES_TO_SCAN = 50
+
+# Max number of mutants to show per feature (i.e. num of points along x-axis).
+NUM_MUTANTS = 10
+
+# Max number of examples to send in a single response.
+# TODO(jameswex): Make dynamic based on example size.
+MAX_EXAMPLES_TO_SEND = 10000
+
+class WhatIfToolPlugin(base_plugin.TBPlugin):
+ """Plugin for understanding/debugging model inference.
+ """
+
+ # This string field is used by TensorBoard to generate the paths for routes
+ # provided by this plugin. It must thus be URL-friendly. This field is also
+ # used to uniquely identify this plugin throughout TensorBoard. See BasePlugin
+ # for details.
+ plugin_name = 'whatif'
+ examples = []
+ updated_example_indices = set()
+ sprite = None
+ example_class = tf.train.Example
+
+ # The standard name for encoded image features in TensorFlow.
+ image_feature_name = 'image/encoded'
+
+ # The width and height of the thumbnail for any images for Facets Dive.
+ sprite_thumbnail_dim_px = 32
+
+ # The vocab of inference class indices to label names for the model.
+ label_vocab = []
+
+ def __init__(self, context):
+ """Constructs an interactive inference plugin for TensorBoard.
+
+ Args:
+ context: A base_plugin.TBContext instance.
+ """
+ self._logdir = context.logdir
+ self._wit_data_dir = context.flags.wit_data_dir if context.flags else None
+
+ self.custom_predict_fn = None
+ if context.flags and context.flags.custom_predict_fn:
+ try:
+ import importlib.util as iu
+ spec = iu.spec_from_file_location("custom_predict_fn", context.flags.custom_predict_fn)
+ module = iu.module_from_spec(spec)
+ spec.loader.exec_module(module)
+ self.custom_predict_fn = module.custom_predict_fn
+ logger.info("custom_predict_fn loaded.")
+
+ except Exception as e:
+ logger.error(str(e))
+ logger.error("Failed to load the custom predict function.")
+ logger.error("Have you defined a function named custom_predict_fn?")
+
+ def get_plugin_apps(self):
+ """Obtains a mapping between routes and handlers. Stores the logdir.
+
+ Returns:
+ A mapping between routes and handlers (functions that respond to
+ requests).
+ """
+ return {
+ '/index.js': self._serve_js,
+ '/wit_tb_bin.html': self._serve_wit,
+ '/wit_tb_bin.js': self._serve_wit_js,
+ '/infer': self._infer,
+ '/update_example': self._update_example,
+ '/examples_from_path': self._examples_from_path_handler,
+ '/sprite': self._serve_sprite,
+ '/duplicate_example': self._duplicate_example,
+ '/delete_example': self._delete_example,
+ '/infer_mutants': self._infer_mutants_handler,
+ '/eligible_features': self._eligible_features_from_example_handler,
+ '/sort_eligible_features': self._sort_eligible_features_handler,
+ }
+
+ def is_active(self):
+ """Determines whether this plugin is active.
+
+ Returns:
+ A boolean. Whether this plugin is active.
+ """
+ # TODO(jameswex): Maybe enable if config flags were specified?
+ return False
+
+ def frontend_metadata(self):
+ return base_plugin.FrontendMetadata(
+ es_module_path="/index.js",
+ tab_name='What-If Tool')
+
+ @wrappers.Request.application
+ def _serve_js(self, request):
+ del request # unused
+ filepath = os.path.join(os.path.dirname(__file__), "static", "index.js")
+ with io.open(filepath, encoding='utf-8') as infile:
+ contents = infile.read()
+ return werkzeug.Response(contents, content_type="application/javascript")
+
+ @wrappers.Request.application
+ def _serve_wit(self, request):
+ del request # unused
+ filepath = os.path.join(os.path.dirname(__file__), "static", "wit_tb_bin.html")
+ with io.open(filepath, encoding='utf-8') as infile:
+ contents = infile.read()
+ return werkzeug.Response(contents, content_type="text/html")
+
+ @wrappers.Request.application
+ def _serve_wit_js(self, request):
+ del request # unused
+ filepath = os.path.join(os.path.dirname(__file__), "static", "wit_tb_bin.js")
+ with io.open(filepath, encoding='utf-8') as infile:
+ contents = infile.read()
+ return werkzeug.Response(contents, content_type="application/javascript")
+ def generate_sprite(self, example_strings):
+ # Generate a sprite image for the examples if the examples contain the
+ # standard encoded image feature.
+ feature_list = (self.examples[0].features.feature
+ if self.example_class == tf.train.Example
+ else self.examples[0].context.feature)
+ self.sprite = (
+ inference_utils.create_sprite_image(example_strings)
+ if (len(self.examples) and self.image_feature_name in feature_list) else
+ None)
+
+ @wrappers.Request.application
+ def _examples_from_path_handler(self, request):
+ """Returns JSON of the specified examples.
+
+ Args:
+ request: A request that should contain 'examples_path' and 'max_examples'.
+
+ Returns:
+ JSON of up to max_examples of the examples in the path.
+ """
+ start_example = (int(request.args.get('start_example'))
+ if request.args.get('start_example') else 0)
+ if not start_example:
+ examples_count = int(request.args.get('max_examples'))
+ examples_path = request.args.get('examples_path')
+ sampling_odds = float(request.args.get('sampling_odds'))
+ self.example_class = (tf.train.SequenceExample
+ if request.args.get('sequence_examples') == 'true'
+ else tf.train.Example)
+ try:
+ platform_utils.throw_if_file_access_not_allowed(examples_path,
+ self._logdir,
+ self._wit_data_dir)
+ example_strings = platform_utils.example_protos_from_path(
+ examples_path, examples_count, parse_examples=False,
+ sampling_odds=sampling_odds, example_class=self.example_class)
+ self.examples = [
+ self.example_class.FromString(ex) for ex in example_strings]
+ self.generate_sprite(example_strings)
+ self.updated_example_indices = set(range(len(self.examples)))
+ except common_utils.InvalidUserInputError as e:
+ logger.error('Data loading error: %s', e.message)
+ return http_util.Respond(request, e.message,
+ 'application/json', code=400)
+ except Exception as e:
+ return http_util.Respond(request, str(e),
+ 'application/json', code=400)
+
+ # Split examples from start_example to + max_examples
+ # Send next start_example if necessary
+ end_example = start_example + MAX_EXAMPLES_TO_SEND
+ json_examples = [
+ json_format.MessageToJson(example) for example in self.examples[
+ start_example:end_example]
+ ]
+ if end_example >= len(self.examples):
+ end_example = -1
+ return http_util.Respond(
+ request,
+ {'examples': json_examples,
+ 'sprite': True if (self.sprite and not start_example) else False,
+ 'next': end_example},
+ 'application/json')
+
+ @wrappers.Request.application
+ def _serve_sprite(self, request):
+ return http_util.Respond(request, self.sprite, 'image/png')
+
+ @wrappers.Request.application
+ def _update_example(self, request):
+ """Updates the specified example.
+
+ Args:
+ request: A request that should contain 'index' and 'example'.
+
+ Returns:
+ An empty response.
+ """
+ if request.method != 'POST':
+ return http_util.Respond(request, 'invalid non-POST request',
+ 'application/json', code=405)
+ example_json = request.form['example']
+ index = int(request.form['index'])
+ if index >= len(self.examples):
+ return http_util.Respond(request, 'invalid index provided',
+ 'application/json', code=400)
+ new_example = self.example_class()
+ json_format.Parse(example_json, new_example)
+ self.examples[index] = new_example
+ self.updated_example_indices.add(index)
+ self.generate_sprite([ex.SerializeToString() for ex in self.examples])
+ return http_util.Respond(request, {}, 'application/json')
+
+ @wrappers.Request.application
+ def _duplicate_example(self, request):
+ """Duplicates the specified example.
+
+ Args:
+ request: A request that should contain 'index'.
+
+ Returns:
+ An empty response.
+ """
+ index = int(request.args.get('index'))
+ if index >= len(self.examples):
+ return http_util.Respond(request, 'invalid index provided',
+ 'application/json', code=400)
+ new_example = self.example_class()
+ new_example.CopyFrom(self.examples[index])
+ self.examples.append(new_example)
+ self.updated_example_indices.add(len(self.examples) - 1)
+ self.generate_sprite([ex.SerializeToString() for ex in self.examples])
+ return http_util.Respond(request, {}, 'application/json')
+
+ @wrappers.Request.application
+ def _delete_example(self, request):
+ """Deletes the specified example.
+
+ Args:
+ request: A request that should contain 'index'.
+
+ Returns:
+ An empty response.
+ """
+ index = int(request.args.get('index'))
+ if index >= len(self.examples):
+ return http_util.Respond(request, 'invalid index provided',
+ 'application/json', code=400)
+ del self.examples[index]
+ self.updated_example_indices = set([
+ i if i < index else i - 1 for i in self.updated_example_indices])
+ self.generate_sprite([ex.SerializeToString() for ex in self.examples])
+ return http_util.Respond(request, {}, 'application/json')
+
+ def _parse_request_arguments(self, request):
+ """Parses comma separated request arguments
+
+ Args:
+ request: A request that should contain 'inference_address', 'model_name',
+ 'model_version', 'model_signature'.
+
+ Returns:
+ A tuple of lists for model parameters
+ """
+ inference_addresses = request.args.get('inference_address').split(',')
+ model_names = request.args.get('model_name').split(',')
+ model_versions = request.args.get('model_version').split(',')
+ model_signatures = request.args.get('model_signature').split(',')
+ if len(model_names) != len(inference_addresses):
+ raise common_utils.InvalidUserInputError('Every model should have a ' +
+ 'name and address.')
+ return inference_addresses, model_names, model_versions, model_signatures
+
+ @wrappers.Request.application
+ def _infer(self, request):
+ """Returns JSON for the `vz-line-chart`s for a feature.
+
+ Args:
+ request: A request that should contain 'inference_address', 'model_name',
+ 'model_type, 'model_version', 'model_signature' and 'label_vocab_path'.
+
+ Returns:
+ A list of JSON objects, one for each chart.
+ """
+ start_example = (int(request.args.get('start_example'))
+ if request.args.get('start_example') else 0)
+ if not start_example:
+ label_vocab = inference_utils.get_label_vocab(
+ request.args.get('label_vocab_path'))
+ try:
+ if request.method != 'GET':
+ logger.error('%s requests are forbidden.', request.method)
+ return http_util.Respond(request, 'invalid non-GET request',
+ 'application/json', code=405)
+
+ (inference_addresses, model_names, model_versions,
+ model_signatures) = self._parse_request_arguments(request)
+
+ self.indices_to_infer = sorted(self.updated_example_indices)
+ examples_to_infer = [self.examples[index] for index in self.indices_to_infer]
+ self.infer_objs = []
+ self.extra_outputs = []
+ for model_num in xrange(len(inference_addresses)):
+ serving_bundle = inference_utils.ServingBundle(
+ inference_addresses[model_num],
+ model_names[model_num],
+ request.args.get('model_type'),
+ model_versions[model_num],
+ model_signatures[model_num],
+ request.args.get('use_predict') == 'true',
+ request.args.get('predict_input_tensor'),
+ request.args.get('predict_output_tensor'),
+ custom_predict_fn=self.custom_predict_fn)
+ (predictions, extra_output) = inference_utils.run_inference_for_inference_results(
+ examples_to_infer, serving_bundle)
+ self.infer_objs.append(predictions)
+ self.extra_outputs.append(extra_output)
+ self.updated_example_indices = set()
+ except AbortionError as e:
+ logging.error(str(e))
+ return http_util.Respond(request, e.details,
+ 'application/json', code=400)
+ except Exception as e:
+ logging.error(str(e))
+ return http_util.Respond(request, str(e),
+ 'application/json', code=400)
+
+ # Split results from start_example to + max_examples
+ # Send next start_example if necessary
+ end_example = start_example + MAX_EXAMPLES_TO_SEND
+
+ def get_inferences_resp():
+ sliced_infer_objs = [
+ copy.deepcopy(infer_obj) for infer_obj in self.infer_objs]
+ if request.args.get('model_type') == 'classification':
+ for obj in sliced_infer_objs:
+ obj['classificationResult']['classifications'][:] = obj[
+ 'classificationResult']['classifications'][
+ start_example:end_example]
+ else:
+ for obj in sliced_infer_objs:
+ obj['regressionResult']['regressions'][:] = obj['regressionResult'][
+ 'regressions'][start_example:end_example]
+ return {'indices': self.indices_to_infer[start_example:end_example],
+ 'results': sliced_infer_objs}
+
+ def get_extra_outputs_resp():
+ sliced_extra_objs = [
+ copy.deepcopy(infer_obj) for infer_obj in self.extra_outputs]
+ for obj in sliced_extra_objs:
+ if obj is not None:
+ for key in obj:
+ obj[key][:] = obj[key][start_example:end_example]
+ return sliced_extra_objs
+
+ try:
+ inferences_resp = get_inferences_resp()
+ extra_outputs_resp = get_extra_outputs_resp()
+ resp = {'inferences': json.dumps(inferences_resp),
+ 'extraOutputs': json.dumps(extra_outputs_resp)}
+ if end_example >= len(self.examples):
+ end_example = -1
+ if start_example == 0:
+ resp['vocab'] = json.dumps(label_vocab)
+ resp['next'] = end_example
+ return http_util.Respond(request, resp, 'application/json')
+ except Exception as e:
+ logging.error(e)
+ return http_util.Respond(request, str(e),
+ 'application/json', code=400)
+
+ @wrappers.Request.application
+ def _eligible_features_from_example_handler(self, request):
+ """Returns a list of JSON objects for each feature in the example.
+
+ Args:
+ request: A request for features.
+
+ Returns:
+ A list with a JSON object for each feature.
+ Numeric features are represented as {name: observedMin: observedMax:}.
+ Categorical features are represented as {name: samples:[]}.
+ """
+ features_list = inference_utils.get_eligible_features(
+ self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)
+ return http_util.Respond(request, features_list, 'application/json')
+
+ @wrappers.Request.application
+ def _sort_eligible_features_handler(self, request):
+ """Returns a sorted list of JSON objects for each feature in the example.
+
+ The list is sorted by interestingness in terms of the resulting change in
+ inference values across feature values, for partial dependence plots.
+
+ Args:
+ request: A request for sorted features.
+
+ Returns:
+ A sorted list with a JSON object for each feature.
+ Numeric features are represented as
+ {name: observedMin: observedMax: interestingness:}.
+ Categorical features are represented as
+ {name: samples:[] interestingness:}.
+ """
+ try:
+ features_list = inference_utils.get_eligible_features(
+ self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)
+ example_index = int(request.args.get('example_index', '0'))
+ (inference_addresses, model_names, model_versions,
+ model_signatures) = self._parse_request_arguments(request)
+ chart_data = {}
+ for feat in features_list:
+ chart_data[feat['name']] = self._infer_mutants_impl(
+ feat['name'], example_index,
+ inference_addresses, model_names, request.args.get('model_type'),
+ model_versions, model_signatures,
+ request.args.get('use_predict') == 'true',
+ request.args.get('predict_input_tensor'),
+ request.args.get('predict_output_tensor'),
+ feat['observedMin'] if 'observedMin' in feat else 0,
+ feat['observedMax'] if 'observedMin' in feat else 0,
+ None, custom_predict_fn=self.custom_predict_fn)
+ features_list = inference_utils.sort_eligible_features(
+ features_list, chart_data)
+ return http_util.Respond(request, features_list, 'application/json')
+ except common_utils.InvalidUserInputError as e:
+ return http_util.Respond(request, e.message,
+ 'application/json', code=400)
+ except Exception as e:
+ return http_util.Respond(request, str(e),
+ 'application/json', code=400)
+
+ @wrappers.Request.application
+ def _infer_mutants_handler(self, request):
+ """Returns JSON for the partial dependence plots for a feature.
+
+ Args:
+ request: A request that should contain 'feature_name', 'example_index',
+ 'inference_address', 'model_name', 'model_type', 'model_version', and
+ 'model_signature'.
+
+ Returns:
+ A list of JSON objects, one for each chart.
+ """
+ try:
+ if request.method != 'GET':
+ logger.error('%s requests are forbidden.', request.method)
+ return http_util.Respond(request, 'invalid non-GET request',
+ 'application/json', code=405)
+
+ example_index = int(request.args.get('example_index', '0'))
+ feature_name = request.args.get('feature_name')
+ (inference_addresses, model_names, model_versions,
+ model_signatures) = self._parse_request_arguments(request)
+ json_mapping = self._infer_mutants_impl(feature_name, example_index,
+ inference_addresses, model_names, request.args.get('model_type'),
+ model_versions, model_signatures,
+ request.args.get('use_predict') == 'true',
+ request.args.get('predict_input_tensor'),
+ request.args.get('predict_output_tensor'),
+ request.args.get('x_min'), request.args.get('x_max'),
+ request.args.get('feature_index_pattern'),
+ custom_predict_fn=self.custom_predict_fn)
+ return http_util.Respond(request, json_mapping, 'application/json')
+ except common_utils.InvalidUserInputError as e:
+ return http_util.Respond(request, e.message,
+ 'application/json', code=400)
+ except Exception as e:
+ return http_util.Respond(request, str(e),
+ 'application/json', code=400)
+
+ def _infer_mutants_impl(self, feature_name, example_index, inference_addresses,
+ model_names, model_type, model_versions, model_signatures, use_predict,
+ predict_input_tensor, predict_output_tensor, x_min, x_max,
+ feature_index_pattern, custom_predict_fn):
+ """Helper for generating PD plots for a feature."""
+ examples = (self.examples if example_index == -1
+ else [self.examples[example_index]])
+ serving_bundles = []
+ for model_num in xrange(len(inference_addresses)):
+ serving_bundles.append(inference_utils.ServingBundle(
+ inference_addresses[model_num],
+ model_names[model_num],
+ model_type,
+ model_versions[model_num],
+ model_signatures[model_num],
+ use_predict,
+ predict_input_tensor,
+ predict_output_tensor,
+ custom_predict_fn=custom_predict_fn))
+
+ viz_params = inference_utils.VizParams(
+ x_min, x_max,
+ self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS,
+ feature_index_pattern)
+ return inference_utils.mutant_charts_for_feature(
+ examples, feature_name, serving_bundles, viz_params)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..be600a40d87d265afa0090fcc17190d0446bbb7d
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/__init__.py
@@ -0,0 +1,35 @@
+from torchaudio import ( # noqa: F401
+ _extension,
+ compliance,
+ datasets,
+ functional,
+ io,
+ kaldi_io,
+ models,
+ pipelines,
+ sox_effects,
+ transforms,
+ utils,
+)
+from torchaudio.backend import get_audio_backend, list_audio_backends, set_audio_backend
+
+try:
+ from .version import __version__, git_version # noqa: F401
+except ImportError:
+ pass
+
+__all__ = [
+ "io",
+ "compliance",
+ "datasets",
+ "functional",
+ "models",
+ "pipelines",
+ "kaldi_io",
+ "utils",
+ "sox_effects",
+ "transforms",
+ "list_audio_backends",
+ "get_audio_backend",
+ "set_audio_backend",
+]
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/_extension.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/_extension.py
new file mode 100644
index 0000000000000000000000000000000000000000..97763abfae818656c9d21253ddfe46c8d1d5e9d8
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/_extension.py
@@ -0,0 +1,103 @@
+import os
+import warnings
+from pathlib import Path
+
+import torch
+from torchaudio._internal import module_utils as _mod_utils # noqa: F401
+
+_LIB_DIR = Path(__file__).parent / "lib"
+
+
+def _get_lib_path(lib: str):
+ suffix = "pyd" if os.name == "nt" else "so"
+ path = _LIB_DIR / f"{lib}.{suffix}"
+ return path
+
+
+def _load_lib(lib: str) -> bool:
+ """Load extension module
+
+ Note:
+ In case `torchaudio` is deployed with `pex` format, the library file
+ is not in a standard location.
+ In this case, we expect that `libtorchaudio` is available somewhere
+ in the search path of dynamic loading mechanism, so that importing
+ `_torchaudio` will have library loader find and load `libtorchaudio`.
+ This is the reason why the function should not raising an error when the library
+ file is not found.
+
+ Returns:
+ bool:
+ True if the library file is found AND the library loaded without failure.
+ False if the library file is not found (like in the case where torchaudio
+ is deployed with pex format, thus the shared library file is
+ in a non-standard location.).
+ If the library file is found but there is an issue loading the library,
+ (such as missing dependency) then this function raises the exception as-is.
+
+ Raises:
+ Exception:
+ If the library file is found, but there is an issue loading the library file,
+ (when underlying `ctype.DLL` throws an exception), this function will pass
+ the exception as-is, instead of catching it and returning bool.
+ The expected case is `OSError` thrown by `ctype.DLL` when a dynamic dependency
+ is not found.
+ This behavior was chosen because the expected failure case is not recoverable.
+ If a dependency is missing, then users have to install it.
+ """
+ path = _get_lib_path(lib)
+ if not path.exists():
+ return False
+ torch.ops.load_library(path)
+ torch.classes.load_library(path)
+ return True
+
+
+_FFMPEG_INITIALIZED = False
+
+
+def _init_ffmpeg():
+ global _FFMPEG_INITIALIZED
+ if _FFMPEG_INITIALIZED:
+ return
+
+ if not torch.ops.torchaudio.is_ffmpeg_available():
+ raise RuntimeError(
+ "torchaudio is not compiled with FFmpeg integration. Please set USE_FFMPEG=1 when compiling torchaudio."
+ )
+
+ try:
+ _load_lib("libtorchaudio_ffmpeg")
+ except OSError as err:
+ raise ImportError("FFmpeg libraries are not found. Please install FFmpeg.") from err
+
+ import torchaudio._torchaudio_ffmpeg # noqa
+
+ torch.ops.torchaudio.ffmpeg_init()
+ if torch.ops.torchaudio.ffmpeg_get_log_level() > 8:
+ torch.ops.torchaudio.ffmpeg_set_log_level(8)
+
+ _FFMPEG_INITIALIZED = True
+
+
+def _init_extension():
+ if not _mod_utils.is_module_available("torchaudio._torchaudio"):
+ warnings.warn("torchaudio C++ extension is not available.")
+ return
+
+ _load_lib("libtorchaudio")
+ # This import is for initializing the methods registered via PyBind11
+ # This has to happen after the base library is loaded
+ from torchaudio import _torchaudio # noqa
+
+ # Because this part is executed as part of `import torchaudio`, we ignore the
+ # initialization failure.
+ # If the FFmpeg integration is not properly initialized, then detailed error
+ # will be raised when client code attempts to import the dedicated feature.
+ try:
+ _init_ffmpeg()
+ except Exception:
+ pass
+
+
+_init_extension()
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/kaldi_io.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/kaldi_io.py
new file mode 100644
index 0000000000000000000000000000000000000000..9cc69f964923516414da6f97a929002e65b73384
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/kaldi_io.py
@@ -0,0 +1,130 @@
+# To use this file, the dependency (https://github.com/vesis84/kaldi-io-for-python)
+# needs to be installed. This is a light wrapper around kaldi_io that returns
+# torch.Tensors.
+from typing import Any, Callable, Iterable, Tuple
+
+import torch
+from torch import Tensor
+from torchaudio._internal import module_utils as _mod_utils
+
+if _mod_utils.is_module_available("kaldi_io", "numpy"):
+ import kaldi_io
+ import numpy as np
+
+
+__all__ = [
+ "read_vec_int_ark",
+ "read_vec_flt_scp",
+ "read_vec_flt_ark",
+ "read_mat_scp",
+ "read_mat_ark",
+]
+
+
+def _convert_method_output_to_tensor(
+ file_or_fd: Any, fn: Callable, convert_contiguous: bool = False
+) -> Iterable[Tuple[str, Tensor]]:
+ r"""Takes a method invokes it. The output is converted to a tensor.
+
+ Args:
+ file_or_fd (str/FileDescriptor): File name or file descriptor
+ fn (Callable): Function that has the signature (file name/descriptor) and converts it to
+ Iterable[Tuple[str, Tensor]].
+ convert_contiguous (bool, optional): Determines whether the array should be converted into a
+ contiguous layout. (Default: ``False``)
+
+ Returns:
+ Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is vec/mat
+ """
+ for key, np_arr in fn(file_or_fd):
+ if convert_contiguous:
+ np_arr = np.ascontiguousarray(np_arr)
+ yield key, torch.from_numpy(np_arr)
+
+
+@_mod_utils.requires_module("kaldi_io", "numpy")
+def read_vec_int_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
+ r"""Create generator of (key,vector) tuples, which reads from the ark file/stream.
+
+ Args:
+ file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
+
+ Returns:
+ Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
+
+ Example
+ >>> # read ark to a 'dictionary'
+ >>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_int_ark(file) }
+ """
+ # Requires convert_contiguous to be True because elements from int32 vector are
+ # sorted in tuples: (sizeof(int32), value) so strides are (5,) instead of (4,) which will throw an error
+ # in from_numpy as it expects strides to be a multiple of 4 (int32).
+ return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_int_ark, convert_contiguous=True)
+
+
+@_mod_utils.requires_module("kaldi_io", "numpy")
+def read_vec_flt_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
+ r"""Create generator of (key,vector) tuples, read according to Kaldi scp.
+
+ Args:
+ file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor
+
+ Returns:
+ Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
+
+ Example
+ >>> # read scp to a 'dictionary'
+ >>> # d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_scp(file) }
+ """
+ return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_scp)
+
+
+@_mod_utils.requires_module("kaldi_io", "numpy")
+def read_vec_flt_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
+ r"""Create generator of (key,vector) tuples, which reads from the ark file/stream.
+
+ Args:
+ file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
+
+ Returns:
+ Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
+
+ Example
+ >>> # read ark to a 'dictionary'
+ >>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_ark(file) }
+ """
+ return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_ark)
+
+
+@_mod_utils.requires_module("kaldi_io", "numpy")
+def read_mat_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
+ r"""Create generator of (key,matrix) tuples, read according to Kaldi scp.
+
+ Args:
+ file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor
+
+ Returns:
+ Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file
+
+ Example
+ >>> # read scp to a 'dictionary'
+ >>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_scp(file) }
+ """
+ return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_mat_scp)
+
+
+@_mod_utils.requires_module("kaldi_io", "numpy")
+def read_mat_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
+ r"""Create generator of (key,matrix) tuples, which reads from the ark file/stream.
+
+ Args:
+ file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
+
+ Returns:
+ Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file
+
+ Example
+ >>> # read ark to a 'dictionary'
+ >>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_ark(file) }
+ """
+ return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_mat_ark)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/version.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..134df65c950e8e538dc90bea2e136f2bf590738d
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/version.py
@@ -0,0 +1,2 @@
+__version__ = '0.12.1+cu113'
+git_version = '58da31733e08438f9d1816f55f54756e53872a92'
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/INSTALLER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/LICENSE b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..1edcf92c3317b90fedd187e2eaad101bd1c1efc5
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) Soumith Chintala 2016,
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/METADATA b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..7e3c53b6450328d47b15aaaa95b2e1a4927dfa72
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/METADATA
@@ -0,0 +1,212 @@
+Metadata-Version: 2.1
+Name: torchvision
+Version: 0.13.1+cu113
+Summary: image and video datasets and models for torch deep learning
+Home-page: https://github.com/pytorch/vision
+Author: PyTorch Core Team
+Author-email: soumith@pytorch.org
+License: BSD
+Requires-Python: >=3.7
+License-File: LICENSE
+Requires-Dist: typing-extensions
+Requires-Dist: numpy
+Requires-Dist: requests
+Requires-Dist: torch (==1.12.1)
+Requires-Dist: pillow (!=8.3.*,>=5.3.0)
+Provides-Extra: scipy
+Requires-Dist: scipy ; extra == 'scipy'
+
+torchvision
+===========
+
+.. image:: https://pepy.tech/badge/torchvision
+ :target: https://pepy.tech/project/torchvision
+
+.. image:: https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchvision%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v
+ :target: https://pytorch.org/vision/stable/index.html
+
+
+The torchvision package consists of popular datasets, model architectures, and common image transformations for computer vision.
+
+
+Installation
+============
+
+We recommend Anaconda as Python package management system. Please refer to `pytorch.org `_
+for the detail of PyTorch (``torch``) installation. The following is the corresponding ``torchvision`` versions and
+supported Python versions.
+
++--------------------------+--------------------------+---------------------------------+
+| ``torch`` | ``torchvision`` | ``python`` |
++==========================+==========================+=================================+
+| ``main`` / ``nightly`` | ``main`` / ``nightly`` | ``>=3.7``, ``<=3.10`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.11.0`` | ``0.12.0`` | ``>=3.7``, ``<=3.10`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.10.2`` | ``0.11.3`` | ``>=3.6``, ``<=3.9`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.10.1`` | ``0.11.2`` | ``>=3.6``, ``<=3.9`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.10.0`` | ``0.11.1`` | ``>=3.6``, ``<=3.9`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.9.1`` | ``0.10.1`` | ``>=3.6``, ``<=3.9`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.9.0`` | ``0.10.0`` | ``>=3.6``, ``<=3.9`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.8.2`` | ``0.9.2`` | ``>=3.6``, ``<=3.9`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.8.1`` | ``0.9.1`` | ``>=3.6``, ``<=3.9`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.8.0`` | ``0.9.0`` | ``>=3.6``, ``<=3.9`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.7.1`` | ``0.8.2`` | ``>=3.6``, ``<=3.9`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.7.0`` | ``0.8.1`` | ``>=3.6``, ``<=3.8`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.7.0`` | ``0.8.0`` | ``>=3.6``, ``<=3.8`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.6.0`` | ``0.7.0`` | ``>=3.6``, ``<=3.8`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.5.1`` | ``0.6.1`` | ``>=3.5``, ``<=3.8`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.5.0`` | ``0.6.0`` | ``>=3.5``, ``<=3.8`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.4.0`` | ``0.5.0`` | ``==2.7``, ``>=3.5``, ``<=3.8`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.3.1`` | ``0.4.2`` | ``==2.7``, ``>=3.5``, ``<=3.7`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.3.0`` | ``0.4.1`` | ``==2.7``, ``>=3.5``, ``<=3.7`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.2.0`` | ``0.4.0`` | ``==2.7``, ``>=3.5``, ``<=3.7`` |
++--------------------------+--------------------------+---------------------------------+
+| ``1.1.0`` | ``0.3.0`` | ``==2.7``, ``>=3.5``, ``<=3.7`` |
++--------------------------+--------------------------+---------------------------------+
+| ``<=1.0.1`` | ``0.2.2`` | ``==2.7``, ``>=3.5``, ``<=3.7`` |
++--------------------------+--------------------------+---------------------------------+
+
+Anaconda:
+
+.. code:: bash
+
+ conda install torchvision -c pytorch
+
+pip:
+
+.. code:: bash
+
+ pip install torchvision
+
+From source:
+
+.. code:: bash
+
+ python setup.py install
+ # or, for OSX
+ # MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py install
+
+
+In case building TorchVision from source fails, install the nightly version of PyTorch following
+the linked guide on the `contributing page `_ and retry the install.
+
+By default, GPU support is built if CUDA is found and ``torch.cuda.is_available()`` is true.
+It's possible to force building GPU support by setting ``FORCE_CUDA=1`` environment variable,
+which is useful when building a docker image.
+
+Image Backend
+=============
+Torchvision currently supports the following image backends:
+
+* `Pillow`_ (default)
+
+* `Pillow-SIMD`_ - a **much faster** drop-in replacement for Pillow with SIMD. If installed will be used as the default.
+
+* `accimage`_ - if installed can be activated by calling :code:`torchvision.set_image_backend('accimage')`
+
+* `libpng`_ - can be installed via conda :code:`conda install libpng` or any of the package managers for debian-based and RHEL-based Linux distributions.
+
+* `libjpeg`_ - can be installed via conda :code:`conda install jpeg` or any of the package managers for debian-based and RHEL-based Linux distributions. `libjpeg-turbo`_ can be used as well.
+
+**Notes:** ``libpng`` and ``libjpeg`` must be available at compilation time in order to be available. Make sure that it is available on the standard library locations,
+otherwise, add the include and library paths in the environment variables ``TORCHVISION_INCLUDE`` and ``TORCHVISION_LIBRARY``, respectively.
+
+.. _libpng : http://www.libpng.org/pub/png/libpng.html
+.. _Pillow : https://python-pillow.org/
+.. _Pillow-SIMD : https://github.com/uploadcare/pillow-simd
+.. _accimage: https://github.com/pytorch/accimage
+.. _libjpeg: http://ijg.org/
+.. _libjpeg-turbo: https://libjpeg-turbo.org/
+
+Video Backend
+=============
+Torchvision currently supports the following video backends:
+
+* `pyav`_ (default) - Pythonic binding for ffmpeg libraries.
+
+.. _pyav : https://github.com/PyAV-Org/PyAV
+
+* video_reader - This needs ffmpeg to be installed and torchvision to be built from source. There shouldn't be any conflicting version of ffmpeg installed. Currently, this is only supported on Linux.
+
+.. code:: bash
+
+ conda install -c conda-forge ffmpeg
+ python setup.py install
+
+
+Using the models on C++
+=======================
+TorchVision provides an example project for how to use the models on C++ using JIT Script.
+
+Installation From source:
+
+.. code:: bash
+
+ mkdir build
+ cd build
+ # Add -DWITH_CUDA=on support for the CUDA if needed
+ cmake ..
+ make
+ make install
+
+Once installed, the library can be accessed in cmake (after properly configuring ``CMAKE_PREFIX_PATH``) via the :code:`TorchVision::TorchVision` target:
+
+.. code:: rest
+
+ find_package(TorchVision REQUIRED)
+ target_link_libraries(my-target PUBLIC TorchVision::TorchVision)
+
+The ``TorchVision`` package will also automatically look for the ``Torch`` package and add it as a dependency to ``my-target``,
+so make sure that it is also available to cmake via the ``CMAKE_PREFIX_PATH``.
+
+For an example setup, take a look at ``examples/cpp/hello_world``.
+
+Python linking is disabled by default when compiling TorchVision with CMake, this allows you to run models without any Python
+dependency. In some special cases where TorchVision's operators are used from Python code, you may need to link to Python. This
+can be done by passing ``-DUSE_PYTHON=on`` to CMake.
+
+TorchVision Operators
+---------------------
+In order to get the torchvision operators registered with torch (eg. for the JIT), all you need to do is to ensure that you
+:code:`#include ` in your project.
+
+Documentation
+=============
+You can find the API documentation on the pytorch website: https://pytorch.org/vision/stable/index.html
+
+Contributing
+============
+
+See the `CONTRIBUTING `_ file for how to help out.
+
+Disclaimer on Datasets
+======================
+
+This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license.
+
+If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community!
+
+Pre-trained Model License
+=========================
+
+The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case.
+
+More specifically, SWAG models are released under the CC-BY-NC 4.0 license. See `SWAG LICENSE `_ for additional details.
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/RECORD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..ba076adbb497d03131fecc8fecafda764642e3ee
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/RECORD
@@ -0,0 +1,310 @@
+torchvision-0.13.1+cu113.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+torchvision-0.13.1+cu113.dist-info/LICENSE,sha256=ZQL2doUc_iX4r3VTHfsyN1tzJbc8N-e0N0H6QiiT5x0,1517
+torchvision-0.13.1+cu113.dist-info/METADATA,sha256=NSKPULTdQrzzY9Ycg0TwXfwjMy-rbGESlz-5b-HdtQ0,10763
+torchvision-0.13.1+cu113.dist-info/RECORD,,
+torchvision-0.13.1+cu113.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+torchvision-0.13.1+cu113.dist-info/WHEEL,sha256=TpFVeXF_cAlV118WSIPWtjqW7nPvzoOw-49FmS3fDKQ,103
+torchvision-0.13.1+cu113.dist-info/top_level.txt,sha256=ucJZoaluBW9BGYT4TuCE6zoZY_JuSP30wbDh-IRpxUU,12
+torchvision.libs/libcudart.053364c0.so.11.0,sha256=p7IPID9fNYt-U289fWBaL60mY1dBoEAURNxp0YSJEms,619192
+torchvision.libs/libjpeg.ceea7512.so.62,sha256=Q0Nt1U7kvyOPOH37o9EyH96wBEFcgH1NNJDDaL1eXew,285328
+torchvision.libs/libnvjpeg.90286a3c.so.11,sha256=oqx0Er6bMI2DPQ3MTnx3ZX0EyjHNrsw3FT6FtsiWmy4,5161560
+torchvision.libs/libpng16.7f72a3c5.so.16,sha256=2hP_4hzlivGlgUDz5g260U3tX3Ov1puQMpY9gnztmyY,1079080
+torchvision.libs/libz.1328edc3.so.1,sha256=V0yKarDo8YF9L8Mpw1QgpgE1rTAVCm_wCONI4tjH2jA,90160
+torchvision/_C.so,sha256=vce0IZ58B_W_VIuwpx8syjqjeIjVSYZrRyuEwfrS2IA,49975840
+torchvision/__init__.py,sha256=be6Lv5UigixY25PobgTvrsasK2ha8bbC6WBG9l_Sw7M,3026
+torchvision/__pycache__/__init__.cpython-38.pyc,,
+torchvision/__pycache__/_internally_replaced_utils.cpython-38.pyc,,
+torchvision/__pycache__/_utils.cpython-38.pyc,,
+torchvision/__pycache__/extension.cpython-38.pyc,,
+torchvision/__pycache__/utils.cpython-38.pyc,,
+torchvision/__pycache__/version.cpython-38.pyc,,
+torchvision/_internally_replaced_utils.py,sha256=lQEoDguCbK1la1ne4ckKcOwVye16yWw9clW44LG_qmU,1743
+torchvision/_utils.py,sha256=3TiMgWBLFMKoTt9y1IHZSiW5lDaiTPJ8UWCBGfdEGic,934
+torchvision/datasets/__init__.py,sha256=fzU18pBr2nqVSbRw-Ay2liCBTLSJ6biAO09K6fpw728,2554
+torchvision/datasets/__pycache__/__init__.cpython-38.pyc,,
+torchvision/datasets/__pycache__/_optical_flow.cpython-38.pyc,,
+torchvision/datasets/__pycache__/caltech.cpython-38.pyc,,
+torchvision/datasets/__pycache__/celeba.cpython-38.pyc,,
+torchvision/datasets/__pycache__/cifar.cpython-38.pyc,,
+torchvision/datasets/__pycache__/cityscapes.cpython-38.pyc,,
+torchvision/datasets/__pycache__/clevr.cpython-38.pyc,,
+torchvision/datasets/__pycache__/coco.cpython-38.pyc,,
+torchvision/datasets/__pycache__/country211.cpython-38.pyc,,
+torchvision/datasets/__pycache__/dtd.cpython-38.pyc,,
+torchvision/datasets/__pycache__/eurosat.cpython-38.pyc,,
+torchvision/datasets/__pycache__/fakedata.cpython-38.pyc,,
+torchvision/datasets/__pycache__/fer2013.cpython-38.pyc,,
+torchvision/datasets/__pycache__/fgvc_aircraft.cpython-38.pyc,,
+torchvision/datasets/__pycache__/flickr.cpython-38.pyc,,
+torchvision/datasets/__pycache__/flowers102.cpython-38.pyc,,
+torchvision/datasets/__pycache__/folder.cpython-38.pyc,,
+torchvision/datasets/__pycache__/food101.cpython-38.pyc,,
+torchvision/datasets/__pycache__/gtsrb.cpython-38.pyc,,
+torchvision/datasets/__pycache__/hmdb51.cpython-38.pyc,,
+torchvision/datasets/__pycache__/imagenet.cpython-38.pyc,,
+torchvision/datasets/__pycache__/inaturalist.cpython-38.pyc,,
+torchvision/datasets/__pycache__/kinetics.cpython-38.pyc,,
+torchvision/datasets/__pycache__/kitti.cpython-38.pyc,,
+torchvision/datasets/__pycache__/lfw.cpython-38.pyc,,
+torchvision/datasets/__pycache__/lsun.cpython-38.pyc,,
+torchvision/datasets/__pycache__/mnist.cpython-38.pyc,,
+torchvision/datasets/__pycache__/omniglot.cpython-38.pyc,,
+torchvision/datasets/__pycache__/oxford_iiit_pet.cpython-38.pyc,,
+torchvision/datasets/__pycache__/pcam.cpython-38.pyc,,
+torchvision/datasets/__pycache__/phototour.cpython-38.pyc,,
+torchvision/datasets/__pycache__/places365.cpython-38.pyc,,
+torchvision/datasets/__pycache__/rendered_sst2.cpython-38.pyc,,
+torchvision/datasets/__pycache__/sbd.cpython-38.pyc,,
+torchvision/datasets/__pycache__/sbu.cpython-38.pyc,,
+torchvision/datasets/__pycache__/semeion.cpython-38.pyc,,
+torchvision/datasets/__pycache__/stanford_cars.cpython-38.pyc,,
+torchvision/datasets/__pycache__/stl10.cpython-38.pyc,,
+torchvision/datasets/__pycache__/sun397.cpython-38.pyc,,
+torchvision/datasets/__pycache__/svhn.cpython-38.pyc,,
+torchvision/datasets/__pycache__/ucf101.cpython-38.pyc,,
+torchvision/datasets/__pycache__/usps.cpython-38.pyc,,
+torchvision/datasets/__pycache__/utils.cpython-38.pyc,,
+torchvision/datasets/__pycache__/video_utils.cpython-38.pyc,,
+torchvision/datasets/__pycache__/vision.cpython-38.pyc,,
+torchvision/datasets/__pycache__/voc.cpython-38.pyc,,
+torchvision/datasets/__pycache__/widerface.cpython-38.pyc,,
+torchvision/datasets/_optical_flow.py,sha256=KmA06a_SJuKbqqzfsaqL7xyVH_woNiLadueDydbOpB8,19330
+torchvision/datasets/caltech.py,sha256=rEurZoltaGuG8tIOKLTk0WDx7atseyHbMwYTXdHS6HQ,8738
+torchvision/datasets/celeba.py,sha256=Oze5jdCwsXCv3XkNPwP8XdvWHlxNzq8_B0khcTCvk00,8297
+torchvision/datasets/cifar.py,sha256=dguXLlCIC0DEjOFn4H70uTg1--ysFYUUN3OOirc2LjE,5851
+torchvision/datasets/cityscapes.py,sha256=KGqmcm-VkdEzYwiVygdiu12nay0c-ZXUT9d2qkMQJuc,10237
+torchvision/datasets/clevr.py,sha256=jrOYayUE1zIyCv9_Ig7XKcNXxEVIa-Jiusm9kAKrghA,3416
+torchvision/datasets/coco.py,sha256=CCCyw8oKXhvxW85G5pd_RQ02mu8qNJF8XVZ5mixJ_7g,3972
+torchvision/datasets/country211.py,sha256=1k-fW1cixljoSCZKCtkNU0aOTtTvtO0-W4Tnyn-v07g,2408
+torchvision/datasets/dtd.py,sha256=WbH23rV45oWwc5JTom5KIQxNuIHc2YtrnBORjH1txzI,3939
+torchvision/datasets/eurosat.py,sha256=cf0RE6i5ngu9cuKFj14-okg7bmLt4VC-5v1fRVd0qtg,2053
+torchvision/datasets/fakedata.py,sha256=VIxYzcoNwtgTGBsR9d3dmpYJhJA63iTPvMROqHsXBmk,2481
+torchvision/datasets/fer2013.py,sha256=tsKrNB-HUz0pCFbpOThA71e1ffZeBrTPtCJY1UnhVlA,2762
+torchvision/datasets/fgvc_aircraft.py,sha256=9b-tHEsuxyChvVzeN7dv9j4QbdjJe6lkk3UwwbTDRP4,4561
+torchvision/datasets/flickr.py,sha256=7u7IKEc-KawRlKV2QKdrbG-XCETR7qwF5uTfp7vNOkQ,5339
+torchvision/datasets/flowers102.py,sha256=QI_lf4UFTKItExlmjvsvusCBLlPucd_73lsH_WG7aH8,4600
+torchvision/datasets/folder.py,sha256=nX8xT7ww9Ypqeo2oj6MTzThg5XpPMxkSqHL6aowfEUs,11928
+torchvision/datasets/food101.py,sha256=vOfMjpzgnawK8jghjKI8HBbJBhlb1lAC_23_ZMcu4Qk,3713
+torchvision/datasets/gtsrb.py,sha256=N17Rq6IQX4oPEAtg-VNqxv789IhSrFKGWmfWAuej1UQ,3742
+torchvision/datasets/hmdb51.py,sha256=aaZKevB7VBrrFjk1Y5PJZpmxNGM0vuE4xALwGW0v6Rg,5910
+torchvision/datasets/imagenet.py,sha256=R99zDi4sv_Pft4qTls8uaIj8KEwLFOgpkixq5pkGaWA,8130
+torchvision/datasets/inaturalist.py,sha256=BNuqP_P1A3L7zafjX-gH60Mp45Njh_MYcUGCiaxlONo,10107
+torchvision/datasets/kinetics.py,sha256=6jRd2uq8NgWlSLT37-H-lQj4oRWp7DMlZ1JaqASrVFQ,13462
+torchvision/datasets/kitti.py,sha256=pE3w4DIpCLT4OBS97jEQNpNJjWuSUaJoi9PX_pW4Fv0,5600
+torchvision/datasets/lfw.py,sha256=6HRNjLNA0qzAmTKVM36XTiZiwq_Ve0WrIwBX64DkYQk,10282
+torchvision/datasets/lsun.py,sha256=ft-KT-QqvduqzMe2u78R-3uGR-6-iYc_PAo-ConZUOQ,5675
+torchvision/datasets/mnist.py,sha256=64AsnaFthqUX9UAOGHhZzNnvoWZBA_Th9S5pH6Oc2RM,21268
+torchvision/datasets/omniglot.py,sha256=DdQ_euGj5a6rHgJgF8hQfll3aqWG77Cqx_QI-bZxq98,4091
+torchvision/datasets/oxford_iiit_pet.py,sha256=XoOlkr6xAkYiodsUcFLGDJOJjwt8f6_L-A9FtsbdW24,5071
+torchvision/datasets/pcam.py,sha256=5prDGj4hIs__vhaTXGInxFj9LQvBjyQM9Q3Yijlxr1Y,5115
+torchvision/datasets/phototour.py,sha256=0bLBhwI5lD5fmXyEtKFW0WTC3XQ2lRm433-8c0Y_ZUA,7924
+torchvision/datasets/places365.py,sha256=kKWXsluuxYHo4R0XCt3br6Pt_gs644u5vNXIcBVWnmc,7201
+torchvision/datasets/rendered_sst2.py,sha256=AlQM5wL74dfh6UG1CJ5Z24nE73Bt-s5cAFB2Kx1Kuc0,3557
+torchvision/datasets/samplers/__init__.py,sha256=xtVWypjklTTfD6ABqUPLBq8mM6TB4LPBhWub4Kt_A0E,161
+torchvision/datasets/samplers/__pycache__/__init__.cpython-38.pyc,,
+torchvision/datasets/samplers/__pycache__/clip_sampler.cpython-38.pyc,,
+torchvision/datasets/samplers/clip_sampler.py,sha256=a02AkBUURZdPgeShCA39iXrOYP9BbFd6P0F_au4IwgY,6244
+torchvision/datasets/sbd.py,sha256=lQ3bEfTmE0MZkaPkV3b99FwgGJzLuQb2q7s55e81F1I,5202
+torchvision/datasets/sbu.py,sha256=FkHyv0WG_wg6LZ1QmIBSGjSGhpnemg7DAnc1Wzo4WZ4,4203
+torchvision/datasets/semeion.py,sha256=25XKKkE1iXHP_tD-IE0kYoSxSE_VsvsfdHEaq1PDE4k,3088
+torchvision/datasets/stanford_cars.py,sha256=xd45oXEfIc8_pJgx38V8PM1GlJ6yOamAyCUNDgBcRNw,4843
+torchvision/datasets/stl10.py,sha256=FzelrpggFWNDy663yXib_5toRDVoEr-V4TK8Mj2BbdY,7294
+torchvision/datasets/sun397.py,sha256=bL_lTJoQYeS5WKFDl_Sc7VWfkZXhTOE9MS3kEJ2rAO4,2743
+torchvision/datasets/svhn.py,sha256=dqZpyoR-qhGnazmjeBup8IAvNCZQbtb7HOt2UGD2BAI,4766
+torchvision/datasets/ucf101.py,sha256=_gSWp-HFkhmy_3p2DmMRbMCWMLdQcvkIfdABt1b_sHo,5472
+torchvision/datasets/usps.py,sha256=slohXYwBzIQtBd3idomN6hSn8U1x3dlB6nmBo_Vw2Ys,3440
+torchvision/datasets/utils.py,sha256=ltwEPcyCysSivJlA3bknU2wQrN8KMGhFoCkw0ceOVEk,17320
+torchvision/datasets/video_utils.py,sha256=4W-YqLjW8Ju4di2F9AxpiSHq2-BHC04CxSXl8NKbAbc,16984
+torchvision/datasets/vision.py,sha256=Vmlmd01282y8qoqZnU2s6Wx1-EVCJUzvG5S7cRxyNh8,4170
+torchvision/datasets/voc.py,sha256=Ci46GRUEJt4PD7fyRVd1ClnKMVlySekFgXohxkJy6A0,9343
+torchvision/datasets/widerface.py,sha256=H4i43NgkJnEr7qCbbjoxcijvyxu6idYmBIpDCfIrj00,8082
+torchvision/extension.py,sha256=ZtX-U76XTXa-8G99Lqka7xrt7Y15q-PUOASwcWqbaxg,3097
+torchvision/image.so,sha256=XlUMgdkTK1dJioRvLnXMhkM53YU-WnpJLuMbB71s0zE,10134264
+torchvision/io/__init__.py,sha256=idLUnZjpKij_ECVxkDO1ABIVhpx2sZXOi2bL_-l9UKw,1494
+torchvision/io/__pycache__/__init__.cpython-38.pyc,,
+torchvision/io/__pycache__/_load_gpu_decoder.cpython-38.pyc,,
+torchvision/io/__pycache__/_video_opt.cpython-38.pyc,,
+torchvision/io/__pycache__/image.cpython-38.pyc,,
+torchvision/io/__pycache__/video.cpython-38.pyc,,
+torchvision/io/__pycache__/video_reader.cpython-38.pyc,,
+torchvision/io/_load_gpu_decoder.py,sha256=nvR0HG0B2-GEYpFiooPELIOgfL6X3gUetPgFGuH4nWs,174
+torchvision/io/_video_opt.py,sha256=Jv8295nfYaCXoi5qx2vQo86ntW61qQzhVpXIxQZN2d0,19729
+torchvision/io/image.py,sha256=L3-QGUVILqNozayGcUSOQKP8DrntkgXQHJqtQLvyiUU,9447
+torchvision/io/video.py,sha256=2VUeEsOfuM2Rj56q9EGw4_AwuLGg_HC0WenzatGldB4,15409
+torchvision/io/video_reader.py,sha256=B-lqxnYBpv02BtNWoCFYjvSD-fG2nVHR3Zkb4wgGq2U,6651
+torchvision/models/__init__.py,sha256=qoVVIvzJ0cuFQO7ypFykxLDyxgoEXeHVtpOcfEIPBmY,534
+torchvision/models/__pycache__/__init__.cpython-38.pyc,,
+torchvision/models/__pycache__/_api.cpython-38.pyc,,
+torchvision/models/__pycache__/_meta.cpython-38.pyc,,
+torchvision/models/__pycache__/_utils.cpython-38.pyc,,
+torchvision/models/__pycache__/alexnet.cpython-38.pyc,,
+torchvision/models/__pycache__/convnext.cpython-38.pyc,,
+torchvision/models/__pycache__/densenet.cpython-38.pyc,,
+torchvision/models/__pycache__/efficientnet.cpython-38.pyc,,
+torchvision/models/__pycache__/feature_extraction.cpython-38.pyc,,
+torchvision/models/__pycache__/googlenet.cpython-38.pyc,,
+torchvision/models/__pycache__/inception.cpython-38.pyc,,
+torchvision/models/__pycache__/mnasnet.cpython-38.pyc,,
+torchvision/models/__pycache__/mobilenet.cpython-38.pyc,,
+torchvision/models/__pycache__/mobilenetv2.cpython-38.pyc,,
+torchvision/models/__pycache__/mobilenetv3.cpython-38.pyc,,
+torchvision/models/__pycache__/regnet.cpython-38.pyc,,
+torchvision/models/__pycache__/resnet.cpython-38.pyc,,
+torchvision/models/__pycache__/shufflenetv2.cpython-38.pyc,,
+torchvision/models/__pycache__/squeezenet.cpython-38.pyc,,
+torchvision/models/__pycache__/swin_transformer.cpython-38.pyc,,
+torchvision/models/__pycache__/vgg.cpython-38.pyc,,
+torchvision/models/__pycache__/vision_transformer.cpython-38.pyc,,
+torchvision/models/_api.py,sha256=gpeLJOMYPrKQZsY71sLUhZNlyi32VvmXjYeUis5I4Ig,5231
+torchvision/models/_meta.py,sha256=fqpeQBsf9EEYbmApQ8Q0LKyM9_UFwjireII5mwDbwJY,28875
+torchvision/models/_utils.py,sha256=BBn3wgV5p9p50Ms6lKn7FFkgbR7-mMrRVaG1KBRodK4,10863
+torchvision/models/alexnet.py,sha256=35szl0asdF8U6SlJecHAZiUb2h_aVhf7OD2GHtGR4-A,4589
+torchvision/models/convnext.py,sha256=abC6b3kEAqz4n0KEJLlgKPg2siOjZY_84MpBpvGh77w,14971
+torchvision/models/densenet.py,sha256=XeSuy07bW_0Ufnop1VWXTWl55GJ8BZczx6dSAZ61c_c,16864
+torchvision/models/detection/__init__.py,sha256=JwYm_fTGO_FeRg4eTOQLwQPZ9lC9jheZ-QEoJgqKTjg,168
+torchvision/models/detection/__pycache__/__init__.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/_utils.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/anchor_utils.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/backbone_utils.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/faster_rcnn.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/fcos.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/generalized_rcnn.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/image_list.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/keypoint_rcnn.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/mask_rcnn.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/retinanet.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/roi_heads.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/rpn.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/ssd.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/ssdlite.cpython-38.pyc,,
+torchvision/models/detection/__pycache__/transform.cpython-38.pyc,,
+torchvision/models/detection/_utils.py,sha256=4FbLRtDiPdPKZCChfiS628aIdmCbt11UjMOYilAiatE,22061
+torchvision/models/detection/anchor_utils.py,sha256=m5auVUofHAd925orWFMaNzNU4JdJGzvymW5486I6_Pc,11822
+torchvision/models/detection/backbone_utils.py,sha256=njwNWlJ410UxGhYspc7ZkCy1_AGXDXge0S2bV0sgQh4,10464
+torchvision/models/detection/faster_rcnn.py,sha256=NRCrqGkvD9sMH2LP5EHDMTKz4lq56S6zpcFWnK8OeB4,36633
+torchvision/models/detection/fcos.py,sha256=-DFwCtHUJWtZRHmcWECxtPUTF-XrC9FFW9rafx3iLJQ,34341
+torchvision/models/detection/generalized_rcnn.py,sha256=vuzcv9NNC_2PGDhBW0EZizdv8ykbUNDr7u_ACwoWZWw,4743
+torchvision/models/detection/image_list.py,sha256=SUJ3xMn-1xc6ivYZUNIdWBh3RH9xD8EtCdpsXnPI_iM,783
+torchvision/models/detection/keypoint_rcnn.py,sha256=G8qJybrRYRFo8krCFznK6OdsG1CfCcTNMFMLqu2eoIo,22001
+torchvision/models/detection/mask_rcnn.py,sha256=BDW3_CKdj853ZAmMMcVeWQq7VjxMIwtOJs9NBXiKqT4,26337
+torchvision/models/detection/retinanet.py,sha256=kuQwZG77078w-y_MacwXgFxRW11YzqAZR63JXIO3W-s,36928
+torchvision/models/detection/roi_heads.py,sha256=FO42VPrz-5yWY4WhM8dNuN7f9hpwPBcY7SSn-koawyM,33822
+torchvision/models/detection/rpn.py,sha256=ENiwgvReI25I61nhW9pGXIU6EtMCUGtfq2ccAML-eZc,15818
+torchvision/models/detection/ssd.py,sha256=FU8gLERlYYxAs4ufvo8AZkusj6tqXhspDdts5cbF8fE,29565
+torchvision/models/detection/ssdlite.py,sha256=gWdafmr3UJ9fEVTyW_ZReCEzmGEaiIThATgP-F2iP8Y,13345
+torchvision/models/detection/transform.py,sha256=8pVzm5y1At7vXi-Z5FpofC1I2KJ1gHy6hoSdmwJgSZc,11839
+torchvision/models/efficientnet.py,sha256=pQuedOrBVe8S3-zJ7Qbv_aDsR21nCOUJVVAlfaJKxxQ,42915
+torchvision/models/feature_extraction.py,sha256=StwSs8aBWHGcFPno-2xIS9ruh6PiHWw9WCdDOGyZz5Y,25588
+torchvision/models/googlenet.py,sha256=A4874JOtPXgznkFusxua16X1s3XL68Y2pZPthUeAdQ0,12955
+torchvision/models/inception.py,sha256=qTrWIX62P2YHjgZgjkas7rFxiDhws2YKqYwYjp9vZ4U,19015
+torchvision/models/mnasnet.py,sha256=VJfTQLCn00fH3e__5kpoMM2QBik_9_mt3QblWS8RlTM,17245
+torchvision/models/mobilenet.py,sha256=lSRVxw2TL3LFBwCadvyvH6n3GzqUTnK2-rhX3MOgSrs,211
+torchvision/models/mobilenetv2.py,sha256=r5d7vO0fU_Nn70JkUdDZQ6htaZfbfPEAXxDDuAGzzlo,10452
+torchvision/models/mobilenetv3.py,sha256=jBtE_zGxFhlB8Z6SOV1JoQzh9CqAI_hmc3NGOVuSaC4,16942
+torchvision/models/optical_flow/__init__.py,sha256=0zRlMWQJCjFqoUafUXVgO89-z7em7tACo9E8hHSq9RQ,20
+torchvision/models/optical_flow/__pycache__/__init__.cpython-38.pyc,,
+torchvision/models/optical_flow/__pycache__/_utils.cpython-38.pyc,,
+torchvision/models/optical_flow/__pycache__/raft.cpython-38.pyc,,
+torchvision/models/optical_flow/_utils.py,sha256=UGlY_R8Fnljz4fFgizNWCy4sGAnarhCjuTFULXawqAY,1798
+torchvision/models/optical_flow/raft.py,sha256=nVK37JkIo1WOj3GnnaAw-ZJKHlJadeljmR5kselzrrU,38251
+torchvision/models/quantization/__init__.py,sha256=gqFM7zI4UUHKKBDJAumozOn7xPL0JtvyNS8Ejz6QXp0,125
+torchvision/models/quantization/__pycache__/__init__.cpython-38.pyc,,
+torchvision/models/quantization/__pycache__/googlenet.cpython-38.pyc,,
+torchvision/models/quantization/__pycache__/inception.cpython-38.pyc,,
+torchvision/models/quantization/__pycache__/mobilenet.cpython-38.pyc,,
+torchvision/models/quantization/__pycache__/mobilenetv2.cpython-38.pyc,,
+torchvision/models/quantization/__pycache__/mobilenetv3.cpython-38.pyc,,
+torchvision/models/quantization/__pycache__/resnet.cpython-38.pyc,,
+torchvision/models/quantization/__pycache__/shufflenetv2.cpython-38.pyc,,
+torchvision/models/quantization/__pycache__/utils.cpython-38.pyc,,
+torchvision/models/quantization/googlenet.py,sha256=m8oMh3l71a06m79U4kR8koTzng3Cj2ibDF6wZ1cd8Ao,8322
+torchvision/models/quantization/inception.py,sha256=f6lOZn_16PArHCoU_6Teqr6qL5xKUvPeC4fgjefy5HQ,11199
+torchvision/models/quantization/mobilenet.py,sha256=lSRVxw2TL3LFBwCadvyvH6n3GzqUTnK2-rhX3MOgSrs,211
+torchvision/models/quantization/mobilenetv2.py,sha256=jAgoCJt-lbBUtsbwad0xPhctBfVOXcWZr9d0fxRH304,6067
+torchvision/models/quantization/mobilenetv3.py,sha256=ZXkQOuSm0w5vRlCvHrh0YxKJmpdtvolFQD7YihPtgRE,9402
+torchvision/models/quantization/resnet.py,sha256=WB3re466O1tRQbtegUnlA2sCkKhOiQQp9W_sOcbF4u0,17574
+torchvision/models/quantization/shufflenetv2.py,sha256=tse0ukE4zZjM8OqPV5rm7TwCdy99FSqjgYCto9LHVMQ,16323
+torchvision/models/quantization/utils.py,sha256=n8mWsK9_Ek_M2AqGKPLoLlcKaYGH2PrF2l5_W84oBMk,2058
+torchvision/models/regnet.py,sha256=kzWMoei5HVqEajE1z-IF8Ku1iyX64O_zHwDLybOJWSM,63163
+torchvision/models/resnet.py,sha256=JBM2yaDWuimAIiFD0R_rbOdV45R7g191LKZn26zXfog,38326
+torchvision/models/segmentation/__init__.py,sha256=TGk6UdVXAMtwBpYalrvdXZnmSwqzTDOT1lgKrfzhHrQ,66
+torchvision/models/segmentation/__pycache__/__init__.cpython-38.pyc,,
+torchvision/models/segmentation/__pycache__/_utils.cpython-38.pyc,,
+torchvision/models/segmentation/__pycache__/deeplabv3.cpython-38.pyc,,
+torchvision/models/segmentation/__pycache__/fcn.cpython-38.pyc,,
+torchvision/models/segmentation/__pycache__/lraspp.cpython-38.pyc,,
+torchvision/models/segmentation/__pycache__/segmentation.cpython-38.pyc,,
+torchvision/models/segmentation/_utils.py,sha256=IdHF0ORnHii4YPQ_SAhe2RAEmNwLL-Wwk6BU86XXou8,1197
+torchvision/models/segmentation/deeplabv3.py,sha256=Nfv41ir60NB8seJR-MFQCgDEaf_DkqtnVCGJk9xK51g,15027
+torchvision/models/segmentation/fcn.py,sha256=TD9-GbBSNug2DmXZJA1LF4-cfKjA_JZbhk1I3P64owE,9026
+torchvision/models/segmentation/lraspp.py,sha256=LhBbtjmWSZWtTBiXLRngoXTYLJhFLaYSuamBg5wD0qM,7783
+torchvision/models/segmentation/segmentation.py,sha256=0A10_W_T-gaYvXrkzTsfJh6qahAOd6LmWs4OUa-iFdo,301
+torchvision/models/shufflenetv2.py,sha256=axn0tpbq4dQbLHrnfbdKhVeTyayeiZdPCS9OJBQEO2U,15468
+torchvision/models/squeezenet.py,sha256=Yu9ZKL8eyssD7TqtyfYMljbjGt4aXnKMdVVAdQWBSJ4,8866
+torchvision/models/swin_transformer.py,sha256=s_3pXq2umsHo_loQGI9X9KplarvmfkZUDa64PVRkQ38,23290
+torchvision/models/vgg.py,sha256=XfKkXJLnb4tyht0lBJoCBSKZMfDtzI5h-KLmEgJ9In0,19072
+torchvision/models/video/__init__.py,sha256=rNdb4S0hSIwQP1zvbNUtIwzXicqEd7szmv_nX8N4cD0,22
+torchvision/models/video/__pycache__/__init__.cpython-38.pyc,,
+torchvision/models/video/__pycache__/resnet.cpython-38.pyc,,
+torchvision/models/video/resnet.py,sha256=oNUpOi4gRNnWJbhr9DI31KTXk4js4MzXrgBOgTR7gZA,16377
+torchvision/models/vision_transformer.py,sha256=VS-IwjxMsh1Q7zEYedPqFaeHrBIb2Z_Chv1FQSpW_Xs,31638
+torchvision/ops/__init__.py,sha256=agxr7Eo3iuWatDQ0IlrG-nTj1CD0O9KOiGu6ZY-x8Qo,1928
+torchvision/ops/__pycache__/__init__.cpython-38.pyc,,
+torchvision/ops/__pycache__/_box_convert.cpython-38.pyc,,
+torchvision/ops/__pycache__/_register_onnx_ops.cpython-38.pyc,,
+torchvision/ops/__pycache__/_utils.cpython-38.pyc,,
+torchvision/ops/__pycache__/boxes.cpython-38.pyc,,
+torchvision/ops/__pycache__/ciou_loss.cpython-38.pyc,,
+torchvision/ops/__pycache__/deform_conv.cpython-38.pyc,,
+torchvision/ops/__pycache__/diou_loss.cpython-38.pyc,,
+torchvision/ops/__pycache__/drop_block.cpython-38.pyc,,
+torchvision/ops/__pycache__/feature_pyramid_network.cpython-38.pyc,,
+torchvision/ops/__pycache__/focal_loss.cpython-38.pyc,,
+torchvision/ops/__pycache__/giou_loss.cpython-38.pyc,,
+torchvision/ops/__pycache__/misc.cpython-38.pyc,,
+torchvision/ops/__pycache__/poolers.cpython-38.pyc,,
+torchvision/ops/__pycache__/ps_roi_align.cpython-38.pyc,,
+torchvision/ops/__pycache__/ps_roi_pool.cpython-38.pyc,,
+torchvision/ops/__pycache__/roi_align.cpython-38.pyc,,
+torchvision/ops/__pycache__/roi_pool.cpython-38.pyc,,
+torchvision/ops/__pycache__/stochastic_depth.cpython-38.pyc,,
+torchvision/ops/_box_convert.py,sha256=ivW3XgJYRAcvb0JBTheYfjJNqqz9wKWJFfPpUZEexik,2408
+torchvision/ops/_register_onnx_ops.py,sha256=R4DP_S3ZUbCVBZnCiRJvzgHsfnWsQPr6LS6mBH4xcLw,2939
+torchvision/ops/_utils.py,sha256=pVHPpsmx6XcfGjUVk-XAEnd8QJBkrw_cT6fO_IwICE4,3630
+torchvision/ops/boxes.py,sha256=mZTce4nI0uZV2AQrIsOUTc9-gD_K0jhWykVr032Q9jY,15497
+torchvision/ops/ciou_loss.py,sha256=ZGaWBxfFgfmrkTS_A3Kxvqn0SfEU1DCu_VTSUXyPwrA,2497
+torchvision/ops/deform_conv.py,sha256=xij8ZLCM_FgZ05iOXLbE3hsHk5G5BQ9iJCCefsTefzo,6990
+torchvision/ops/diou_loss.py,sha256=tRTIJm0Goq65Et8-YkaRON7Lbu5BwDAuOBbPqmyJXz4,3101
+torchvision/ops/drop_block.py,sha256=aeZBgCqMrfptvWgxPsqxhw1PpVnmz9BmCpZdWn4A8N0,5859
+torchvision/ops/feature_pyramid_network.py,sha256=no_A6QiOOOsl84Kn1XGwAdepjcFyNKgb2Hus9-UqrH0,8569
+torchvision/ops/focal_loss.py,sha256=4Xn3o_4AjEA7QtwYwun92KEZHSN7VSHZEMHnCDklUt8,2000
+torchvision/ops/giou_loss.py,sha256=tBxaJUy-Mo7gfAB6Y9HfI3T5b2OTONwLpBDdFtLsNOo,2436
+torchvision/ops/misc.py,sha256=DW_jp1r60s-BKUhz5KroAYmeLMhFSTxha3tAp3eIHAY,12663
+torchvision/ops/poolers.py,sha256=rHGZVv6N-9B57wiOcFNq_crubpLYUJIHhNZMNmD8jwI,12765
+torchvision/ops/ps_roi_align.py,sha256=HURlBD1Q5-jfqEyrgTZieAlbWXiYbsGVfrk9IsUcSq8,3594
+torchvision/ops/ps_roi_pool.py,sha256=-NeV75c71P7QQqv7eVkClje8Kmj8CCW6QxViekCHzy8,2839
+torchvision/ops/roi_align.py,sha256=HI2NTpGgNTZU7LhgDZPgPPhsEJs3QJ9YNF69yGjfIGg,4127
+torchvision/ops/roi_pool.py,sha256=YSJA6sJnAp3W0rPKHaozzvyQJZxScyWRFEXcJtDRr8M,2891
+torchvision/ops/stochastic_depth.py,sha256=ISZ9noJyZLxpTG-wa2VmPs66qjhVsP7ZxWHvumWSP3U,2236
+torchvision/transforms/__init__.py,sha256=EMft42B1JAiU11J1rxIN4Znis6EJPbp-bsGjAzH-24M,53
+torchvision/transforms/__pycache__/__init__.cpython-38.pyc,,
+torchvision/transforms/__pycache__/_functional_video.cpython-38.pyc,,
+torchvision/transforms/__pycache__/_pil_constants.cpython-38.pyc,,
+torchvision/transforms/__pycache__/_presets.cpython-38.pyc,,
+torchvision/transforms/__pycache__/_transforms_video.cpython-38.pyc,,
+torchvision/transforms/__pycache__/autoaugment.cpython-38.pyc,,
+torchvision/transforms/__pycache__/functional.cpython-38.pyc,,
+torchvision/transforms/__pycache__/functional_pil.cpython-38.pyc,,
+torchvision/transforms/__pycache__/functional_tensor.cpython-38.pyc,,
+torchvision/transforms/__pycache__/transforms.cpython-38.pyc,,
+torchvision/transforms/_functional_video.py,sha256=mOc6p5bGXxrPsOGTuFOgnIoXaRwRp2g3IkcM_g3zlok,3851
+torchvision/transforms/_pil_constants.py,sha256=lGzZCeZJMf33iayGmX2YDLrB6AAGVn-sks_FbOklNLM,818
+torchvision/transforms/_presets.py,sha256=LtEXmc7DC1dkPHu-XpdzjDrcVYTJgvxR5T4q8eRV_io,8013
+torchvision/transforms/_transforms_video.py,sha256=OS-fwVoOH3yUKmvIfQamHrt4UNm6b0CJEmqZ-t45OlA,4956
+torchvision/transforms/autoaugment.py,sha256=Y-4y08nfgpr38JQ-fOWStzXIlm7t-yxS1-YRP98vdTs,28243
+torchvision/transforms/functional.py,sha256=rCVBd4Bc6tAVPrlp5TzxDXFYGeXUi6izEDgdCJ7P3UY,63349
+torchvision/transforms/functional_pil.py,sha256=2HZptBTvuuVkpLcdFQ5bMv7FXtSeU3lWYf0IR-huQ-A,13279
+torchvision/transforms/functional_tensor.py,sha256=OvcK3ex7rgHYbb44VdbSlM7Mb2g4kyT7KhmV2BzeF-8,34350
+torchvision/transforms/transforms.py,sha256=11pXUuGoUBD8ILERUKjbJ7z28hx3wC3J5XLQFr6KI0g,81589
+torchvision/utils.py,sha256=Kpo4I5o5AN_u3QzbwS0_mBB9kxQ3IQ_BjMlcPNMsxds,22177
+torchvision/version.py,sha256=d6pUziuBw5LbMV3bZXvUge5qh890Kz7f-sCDAg46pc8,203
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/REQUESTED b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/WHEEL b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..d193dea988d97c2f7f7bf3c4fc196496d361cd4d
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: false
+Tag: cp38-cp38-linux_x86_64
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e35531e566f2a925d851b9d3b8fa99645838e6e0
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchvision-0.13.1+cu113.dist-info/top_level.txt
@@ -0,0 +1 @@
+torchvision
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/PKG-INFO b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/PKG-INFO
new file mode 100644
index 0000000000000000000000000000000000000000..86bfb06ea5ad955be188c068759d63f7d51da110
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/PKG-INFO
@@ -0,0 +1,64 @@
+Metadata-Version: 2.1
+Name: wheel
+Version: 0.36.2
+Summary: A built-package format for Python
+Home-page: https://github.com/pypa/wheel
+Author: Daniel Holth
+Author-email: dholth@fastmail.fm
+Maintainer: Alex Grönholm
+Maintainer-email: alex.gronholm@nextday.fi
+License: MIT
+Project-URL: Documentation, https://wheel.readthedocs.io/
+Project-URL: Changelog, https://wheel.readthedocs.io/en/stable/news.html
+Project-URL: Issue Tracker, https://github.com/pypa/wheel/issues
+Description: wheel
+ =====
+
+ This library is the reference implementation of the Python wheel packaging
+ standard, as defined in `PEP 427`_.
+
+ It has two different roles:
+
+ #. A setuptools_ extension for building wheels that provides the
+ ``bdist_wheel`` setuptools command
+ #. A command line tool for working with wheel files
+
+ It should be noted that wheel is **not** intended to be used as a library, and
+ as such there is no stable, public API.
+
+ .. _PEP 427: https://www.python.org/dev/peps/pep-0427/
+ .. _setuptools: https://pypi.org/project/setuptools/
+
+ Documentation
+ -------------
+
+ The documentation_ can be found on Read The Docs.
+
+ .. _documentation: https://wheel.readthedocs.io/
+
+ Code of Conduct
+ ---------------
+
+ Everyone interacting in the wheel project's codebases, issue trackers, chat
+ rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
+
+ .. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
+
+
+Keywords: wheel,packaging
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: System :: Archiving :: Packaging
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7
+Provides-Extra: test
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/SOURCES.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/SOURCES.txt
new file mode 100644
index 0000000000000000000000000000000000000000..944df520d5584bf7e0b0dffb07d9ed177e24a459
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/SOURCES.txt
@@ -0,0 +1,71 @@
+LICENSE.txt
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+tox.ini
+docs/Makefile
+docs/conf.py
+docs/development.rst
+docs/index.rst
+docs/installing.rst
+docs/make.bat
+docs/news.rst
+docs/quickstart.rst
+docs/story.rst
+docs/user_guide.rst
+docs/reference/index.rst
+docs/reference/wheel_convert.rst
+docs/reference/wheel_pack.rst
+docs/reference/wheel_unpack.rst
+manpages/wheel.rst
+src/wheel/__init__.py
+src/wheel/__main__.py
+src/wheel/bdist_wheel.py
+src/wheel/macosx_libfile.py
+src/wheel/metadata.py
+src/wheel/pkginfo.py
+src/wheel/util.py
+src/wheel/wheelfile.py
+src/wheel.egg-info/PKG-INFO
+src/wheel.egg-info/SOURCES.txt
+src/wheel.egg-info/dependency_links.txt
+src/wheel.egg-info/entry_points.txt
+src/wheel.egg-info/not-zip-safe
+src/wheel.egg-info/requires.txt
+src/wheel.egg-info/top_level.txt
+src/wheel/cli/__init__.py
+src/wheel/cli/convert.py
+src/wheel/cli/pack.py
+src/wheel/cli/unpack.py
+src/wheel/vendored/__init__.py
+src/wheel/vendored/packaging/__init__.py
+src/wheel/vendored/packaging/_typing.py
+src/wheel/vendored/packaging/tags.py
+tests/conftest.py
+tests/test_bdist_wheel.py
+tests/test_macosx_libfile.py
+tests/test_metadata.py
+tests/test_pkginfo.py
+tests/test_tagopt.py
+tests/test_wheelfile.py
+tests/cli/eggnames.txt
+tests/cli/test_convert.py
+tests/cli/test_pack.py
+tests/cli/test_unpack.py
+tests/testdata/test-1.0-py2.py3-none-any.whl
+tests/testdata/abi3extension.dist/extension.c
+tests/testdata/abi3extension.dist/setup.py
+tests/testdata/complex-dist/setup.py
+tests/testdata/complex-dist/complexdist/__init__.py
+tests/testdata/extension.dist/extension.c
+tests/testdata/extension.dist/setup.py
+tests/testdata/headers.dist/header.h
+tests/testdata/headers.dist/headersdist.py
+tests/testdata/headers.dist/setup.py
+tests/testdata/macosx_minimal_system_version/test_lib.c
+tests/testdata/simple.dist/setup.py
+tests/testdata/simple.dist/simpledist/__init__.py
+tests/testdata/unicode.dist/setup.py
+tests/testdata/unicode.dist/unicodedist/__init__.py
+tests/testdata/unicode.dist/unicodedist/åäö_日本語.py
\ No newline at end of file
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/entry_points.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b27acaddfde8258ee5b9eb70963ff108041bd998
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/entry_points.txt
@@ -0,0 +1,6 @@
+[console_scripts]
+wheel = wheel.cli:main
+
+[distutils.commands]
+bdist_wheel = wheel.bdist_wheel:bdist_wheel
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/not-zip-safe b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/not-zip-safe
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/requires.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/requires.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1173c70c55eafef2e3af3332578aeb10cd8d11cf
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/requires.txt
@@ -0,0 +1,4 @@
+
+[test]
+pytest>=3.0.0
+pytest-cov
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2309722a93d261ba6840e2a1d18a96ac5b71f7b8
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel-0.36.2-py3.6.egg-info/top_level.txt
@@ -0,0 +1 @@
+wheel
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..349a8f7bea8082538efebe3093bcd58ea7c730a9
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__init__.py
@@ -0,0 +1 @@
+__version__ = '0.36.2'
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__main__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3773a20e08b985d4160b624fbfe1742f35472cd
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__main__.py
@@ -0,0 +1,19 @@
+"""
+Wheel command line tool (enable python -m wheel syntax)
+"""
+
+import sys
+
+
+def main(): # needed for console script
+ if __package__ == '':
+ # To be able to run 'python wheel-0.9.whl/wheel':
+ import os.path
+ path = os.path.dirname(os.path.dirname(__file__))
+ sys.path[0:0] = [path]
+ import wheel.cli
+ sys.exit(wheel.cli.main())
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/bdist_wheel.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/bdist_wheel.py
new file mode 100644
index 0000000000000000000000000000000000000000..80e43d0a5f557e877bb564cd117e32eb83f6a995
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/bdist_wheel.py
@@ -0,0 +1,492 @@
+"""
+Create a wheel (.whl) distribution.
+
+A wheel is a built archive format.
+"""
+
+import distutils
+import os
+import shutil
+import stat
+import sys
+import re
+import warnings
+from collections import OrderedDict
+from distutils.core import Command
+from distutils import log as logger
+from io import BytesIO
+from glob import iglob
+from shutil import rmtree
+from sysconfig import get_config_var
+from zipfile import ZIP_DEFLATED, ZIP_STORED
+
+import pkg_resources
+
+from .pkginfo import write_pkg_info
+from .macosx_libfile import calculate_macosx_platform_tag
+from .metadata import pkginfo_to_metadata
+from .vendored.packaging import tags
+from .wheelfile import WheelFile
+from . import __version__ as wheel_version
+
+if sys.version_info < (3,):
+ from email.generator import Generator as BytesGenerator
+else:
+ from email.generator import BytesGenerator
+
+safe_name = pkg_resources.safe_name
+safe_version = pkg_resources.safe_version
+
+PY_LIMITED_API_PATTERN = r'cp3\d'
+
+
+def python_tag():
+ return 'py{}'.format(sys.version_info[0])
+
+
+def get_platform(archive_root):
+ """Return our platform name 'win32', 'linux_x86_64'"""
+ # XXX remove distutils dependency
+ result = distutils.util.get_platform()
+ if result.startswith("macosx") and archive_root is not None:
+ result = calculate_macosx_platform_tag(archive_root, result)
+ if result == "linux_x86_64" and sys.maxsize == 2147483647:
+ # pip pull request #3497
+ result = "linux_i686"
+ return result
+
+
+def get_flag(var, fallback, expected=True, warn=True):
+ """Use a fallback value for determining SOABI flags if the needed config
+ var is unset or unavailable."""
+ val = get_config_var(var)
+ if val is None:
+ if warn:
+ warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
+ "be incorrect".format(var), RuntimeWarning, 2)
+ return fallback
+ return val == expected
+
+
+def get_abi_tag():
+ """Return the ABI tag based on SOABI (if available) or emulate SOABI
+ (CPython 2, PyPy)."""
+ soabi = get_config_var('SOABI')
+ impl = tags.interpreter_name()
+ if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
+ d = ''
+ m = ''
+ u = ''
+ if get_flag('Py_DEBUG',
+ hasattr(sys, 'gettotalrefcount'),
+ warn=(impl == 'cp')):
+ d = 'd'
+ if get_flag('WITH_PYMALLOC',
+ impl == 'cp',
+ warn=(impl == 'cp' and
+ sys.version_info < (3, 8))) \
+ and sys.version_info < (3, 8):
+ m = 'm'
+ if get_flag('Py_UNICODE_SIZE',
+ sys.maxunicode == 0x10ffff,
+ expected=4,
+ warn=(impl == 'cp' and
+ sys.version_info < (3, 3))) \
+ and sys.version_info < (3, 3):
+ u = 'u'
+ abi = '%s%s%s%s%s' % (impl, tags.interpreter_version(), d, m, u)
+ elif soabi and soabi.startswith('cpython-'):
+ abi = 'cp' + soabi.split('-')[1]
+ elif soabi and soabi.startswith('pypy-'):
+ # we want something like pypy36-pp73
+ abi = '-'.join(soabi.split('-')[:2])
+ abi = abi.replace('.', '_').replace('-', '_')
+ elif soabi:
+ abi = soabi.replace('.', '_').replace('-', '_')
+ else:
+ abi = None
+ return abi
+
+
+def safer_name(name):
+ return safe_name(name).replace('-', '_')
+
+
+def safer_version(version):
+ return safe_version(version).replace('-', '_')
+
+
+def remove_readonly(func, path, excinfo):
+ print(str(excinfo[1]))
+ os.chmod(path, stat.S_IWRITE)
+ func(path)
+
+
+class bdist_wheel(Command):
+
+ description = 'create a wheel distribution'
+
+ supported_compressions = OrderedDict([
+ ('stored', ZIP_STORED),
+ ('deflated', ZIP_DEFLATED)
+ ])
+
+ user_options = [('bdist-dir=', 'b',
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p',
+ "platform name to embed in generated filenames "
+ "(default: %s)" % get_platform(None)),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ('relative', None,
+ "build the archive using relative paths "
+ "(default: false)"),
+ ('owner=', 'u',
+ "Owner name used when creating a tar file"
+ " [default: current user]"),
+ ('group=', 'g',
+ "Group name used when creating a tar file"
+ " [default: current group]"),
+ ('universal', None,
+ "make a universal wheel"
+ " (default: false)"),
+ ('compression=', None,
+ "zipfile compression (one of: {})"
+ " (default: 'deflated')"
+ .format(', '.join(supported_compressions))),
+ ('python-tag=', None,
+ "Python implementation compatibility tag"
+ " (default: '%s')" % (python_tag())),
+ ('build-number=', None,
+ "Build number for this particular version. "
+ "As specified in PEP-0427, this must start with a digit. "
+ "[default: None]"),
+ ('py-limited-api=', None,
+ "Python tag (cp32|cp33|cpNN) for abi3 wheel tag"
+ " (default: false)"),
+ ]
+
+ boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
+
+ def initialize_options(self):
+ self.bdist_dir = None
+ self.data_dir = None
+ self.plat_name = None
+ self.plat_tag = None
+ self.format = 'zip'
+ self.keep_temp = False
+ self.dist_dir = None
+ self.egginfo_dir = None
+ self.root_is_pure = None
+ self.skip_build = None
+ self.relative = False
+ self.owner = None
+ self.group = None
+ self.universal = False
+ self.compression = 'deflated'
+ self.python_tag = python_tag()
+ self.build_number = None
+ self.py_limited_api = False
+ self.plat_name_supplied = False
+
+ def finalize_options(self):
+ if self.bdist_dir is None:
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'wheel')
+
+ self.data_dir = self.wheel_dist_name + '.data'
+ self.plat_name_supplied = self.plat_name is not None
+
+ try:
+ self.compression = self.supported_compressions[self.compression]
+ except KeyError:
+ raise ValueError('Unsupported compression: {}'.format(self.compression))
+
+ need_options = ('dist_dir', 'plat_name', 'skip_build')
+
+ self.set_undefined_options('bdist',
+ *zip(need_options, need_options))
+
+ self.root_is_pure = not (self.distribution.has_ext_modules()
+ or self.distribution.has_c_libraries())
+
+ if self.py_limited_api and not re.match(PY_LIMITED_API_PATTERN, self.py_limited_api):
+ raise ValueError("py-limited-api must match '%s'" % PY_LIMITED_API_PATTERN)
+
+ # Support legacy [wheel] section for setting universal
+ wheel = self.distribution.get_option_dict('wheel')
+ if 'universal' in wheel:
+ # please don't define this in your global configs
+ logger.warn('The [wheel] section is deprecated. Use [bdist_wheel] instead.')
+ val = wheel['universal'][1].strip()
+ if val.lower() in ('1', 'true', 'yes'):
+ self.universal = True
+
+ if self.build_number is not None and not self.build_number[:1].isdigit():
+ raise ValueError("Build tag (build-number) must start with a digit.")
+
+ @property
+ def wheel_dist_name(self):
+ """Return distribution full name with - replaced with _"""
+ components = (safer_name(self.distribution.get_name()),
+ safer_version(self.distribution.get_version()))
+ if self.build_number:
+ components += (self.build_number,)
+ return '-'.join(components)
+
+ def get_tag(self):
+ # bdist sets self.plat_name if unset, we should only use it for purepy
+ # wheels if the user supplied it.
+ if self.plat_name_supplied:
+ plat_name = self.plat_name
+ elif self.root_is_pure:
+ plat_name = 'any'
+ else:
+ # macosx contains system version in platform name so need special handle
+ if self.plat_name and not self.plat_name.startswith("macosx"):
+ plat_name = self.plat_name
+ else:
+ # on macosx always limit the platform name to comply with any
+ # c-extension modules in bdist_dir, since the user can specify
+ # a higher MACOSX_DEPLOYMENT_TARGET via tools like CMake
+
+ # on other platforms, and on macosx if there are no c-extension
+ # modules, use the default platform name.
+ plat_name = get_platform(self.bdist_dir)
+
+ if plat_name in ('linux-x86_64', 'linux_x86_64') and sys.maxsize == 2147483647:
+ plat_name = 'linux_i686'
+
+ plat_name = plat_name.lower().replace('-', '_').replace('.', '_')
+
+ if self.root_is_pure:
+ if self.universal:
+ impl = 'py2.py3'
+ else:
+ impl = self.python_tag
+ tag = (impl, 'none', plat_name)
+ else:
+ impl_name = tags.interpreter_name()
+ impl_ver = tags.interpreter_version()
+ impl = impl_name + impl_ver
+ # We don't work on CPython 3.1, 3.0.
+ if self.py_limited_api and (impl_name + impl_ver).startswith('cp3'):
+ impl = self.py_limited_api
+ abi_tag = 'abi3'
+ else:
+ abi_tag = str(get_abi_tag()).lower()
+ tag = (impl, abi_tag, plat_name)
+ # issue gh-374: allow overriding plat_name
+ supported_tags = [(t.interpreter, t.abi, plat_name)
+ for t in tags.sys_tags()]
+ assert tag in supported_tags, "would build wheel with unsupported tag {}".format(tag)
+ return tag
+
+ def run(self):
+ build_scripts = self.reinitialize_command('build_scripts')
+ build_scripts.executable = 'python'
+ build_scripts.force = True
+
+ build_ext = self.reinitialize_command('build_ext')
+ build_ext.inplace = False
+
+ if not self.skip_build:
+ self.run_command('build')
+
+ install = self.reinitialize_command('install',
+ reinit_subcommands=True)
+ install.root = self.bdist_dir
+ install.compile = False
+ install.skip_build = self.skip_build
+ install.warn_dir = False
+
+ # A wheel without setuptools scripts is more cross-platform.
+ # Use the (undocumented) `no_ep` option to setuptools'
+ # install_scripts command to avoid creating entry point scripts.
+ install_scripts = self.reinitialize_command('install_scripts')
+ install_scripts.no_ep = True
+
+ # Use a custom scheme for the archive, because we have to decide
+ # at installation time which scheme to use.
+ for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
+ setattr(install,
+ 'install_' + key,
+ os.path.join(self.data_dir, key))
+
+ basedir_observed = ''
+
+ if os.name == 'nt':
+ # win32 barfs if any of these are ''; could be '.'?
+ # (distutils.command.install:change_roots bug)
+ basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..'))
+ self.install_libbase = self.install_lib = basedir_observed
+
+ setattr(install,
+ 'install_purelib' if self.root_is_pure else 'install_platlib',
+ basedir_observed)
+
+ logger.info("installing to %s", self.bdist_dir)
+
+ self.run_command('install')
+
+ impl_tag, abi_tag, plat_tag = self.get_tag()
+ archive_basename = "{}-{}-{}-{}".format(self.wheel_dist_name, impl_tag, abi_tag, plat_tag)
+ if not self.relative:
+ archive_root = self.bdist_dir
+ else:
+ archive_root = os.path.join(
+ self.bdist_dir,
+ self._ensure_relative(install.install_base))
+
+ self.set_undefined_options('install_egg_info', ('target', 'egginfo_dir'))
+ distinfo_dirname = '{}-{}.dist-info'.format(
+ safer_name(self.distribution.get_name()),
+ safer_version(self.distribution.get_version()))
+ distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname)
+ self.egg2dist(self.egginfo_dir, distinfo_dir)
+
+ self.write_wheelfile(distinfo_dir)
+
+ # Make the archive
+ if not os.path.exists(self.dist_dir):
+ os.makedirs(self.dist_dir)
+
+ wheel_path = os.path.join(self.dist_dir, archive_basename + '.whl')
+ with WheelFile(wheel_path, 'w', self.compression) as wf:
+ wf.write_files(archive_root)
+
+ # Add to 'Distribution.dist_files' so that the "upload" command works
+ getattr(self.distribution, 'dist_files', []).append(
+ ('bdist_wheel',
+ '{}.{}'.format(*sys.version_info[:2]), # like 3.7
+ wheel_path))
+
+ if not self.keep_temp:
+ logger.info('removing %s', self.bdist_dir)
+ if not self.dry_run:
+ rmtree(self.bdist_dir, onerror=remove_readonly)
+
+ def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel_version + ')'):
+ from email.message import Message
+
+ # Workaround for Python 2.7 for when "generator" is unicode
+ if sys.version_info < (3,) and not isinstance(generator, str):
+ generator = generator.encode('utf-8')
+
+ msg = Message()
+ msg['Wheel-Version'] = '1.0' # of the spec
+ msg['Generator'] = generator
+ msg['Root-Is-Purelib'] = str(self.root_is_pure).lower()
+ if self.build_number is not None:
+ msg['Build'] = self.build_number
+
+ # Doesn't work for bdist_wininst
+ impl_tag, abi_tag, plat_tag = self.get_tag()
+ for impl in impl_tag.split('.'):
+ for abi in abi_tag.split('.'):
+ for plat in plat_tag.split('.'):
+ msg['Tag'] = '-'.join((impl, abi, plat))
+
+ wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
+ logger.info('creating %s', wheelfile_path)
+ buffer = BytesIO()
+ BytesGenerator(buffer, maxheaderlen=0).flatten(msg)
+ with open(wheelfile_path, 'wb') as f:
+ f.write(buffer.getvalue().replace(b'\r\n', b'\r'))
+
+ def _ensure_relative(self, path):
+ # copied from dir_util, deleted
+ drive, path = os.path.splitdrive(path)
+ if path[0:1] == os.sep:
+ path = drive + path[1:]
+ return path
+
+ @property
+ def license_paths(self):
+ metadata = self.distribution.get_option_dict('metadata')
+ files = set()
+ patterns = sorted({
+ option for option in metadata.get('license_files', ('', ''))[1].split()
+ })
+
+ if 'license_file' in metadata:
+ warnings.warn('The "license_file" option is deprecated. Use '
+ '"license_files" instead.', DeprecationWarning)
+ files.add(metadata['license_file'][1])
+
+ if 'license_file' not in metadata and 'license_files' not in metadata:
+ patterns = ('LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*')
+
+ for pattern in patterns:
+ for path in iglob(pattern):
+ if path.endswith('~'):
+ logger.debug('ignoring license file "%s" as it looks like a backup', path)
+ continue
+
+ if path not in files and os.path.isfile(path):
+ logger.info('adding license file "%s" (matched pattern "%s")', path, pattern)
+ files.add(path)
+
+ return files
+
+ def egg2dist(self, egginfo_path, distinfo_path):
+ """Convert an .egg-info directory into a .dist-info directory"""
+ def adios(p):
+ """Appropriately delete directory, file or link."""
+ if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
+ shutil.rmtree(p)
+ elif os.path.exists(p):
+ os.unlink(p)
+
+ adios(distinfo_path)
+
+ if not os.path.exists(egginfo_path):
+ # There is no egg-info. This is probably because the egg-info
+ # file/directory is not named matching the distribution name used
+ # to name the archive file. Check for this case and report
+ # accordingly.
+ import glob
+ pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
+ possible = glob.glob(pat)
+ err = "Egg metadata expected at %s but not found" % (egginfo_path,)
+ if possible:
+ alt = os.path.basename(possible[0])
+ err += " (%s found - possible misnamed archive file?)" % (alt,)
+
+ raise ValueError(err)
+
+ if os.path.isfile(egginfo_path):
+ # .egg-info is a single file
+ pkginfo_path = egginfo_path
+ pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path)
+ os.mkdir(distinfo_path)
+ else:
+ # .egg-info is a directory
+ pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
+ pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path)
+
+ # ignore common egg metadata that is useless to wheel
+ shutil.copytree(egginfo_path, distinfo_path,
+ ignore=lambda x, y: {'PKG-INFO', 'requires.txt', 'SOURCES.txt',
+ 'not-zip-safe'}
+ )
+
+ # delete dependency_links if it is only whitespace
+ dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
+ with open(dependency_links_path, 'r') as dependency_links_file:
+ dependency_links = dependency_links_file.read().strip()
+ if not dependency_links:
+ adios(dependency_links_path)
+
+ write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
+
+ for license_path in self.license_paths:
+ filename = os.path.basename(license_path)
+ shutil.copy(license_path, os.path.join(distinfo_path, filename))
+
+ adios(egginfo_path)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/macosx_libfile.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/macosx_libfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..8918039fcce85af9e60d08066580d548aa17c433
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/macosx_libfile.py
@@ -0,0 +1,428 @@
+"""
+This module contains function to analyse dynamic library
+headers to extract system information
+
+Currently only for MacOSX
+
+Library file on macosx system starts with Mach-O or Fat field.
+This can be distinguish by first 32 bites and it is called magic number.
+Proper value of magic number is with suffix _MAGIC. Suffix _CIGAM means
+reversed bytes order.
+Both fields can occur in two types: 32 and 64 bytes.
+
+FAT field inform that this library contains few version of library
+(typically for different types version). It contains
+information where Mach-O headers starts.
+
+Each section started with Mach-O header contains one library
+(So if file starts with this field it contains only one version).
+
+After filed Mach-O there are section fields.
+Each of them starts with two fields:
+cmd - magic number for this command
+cmdsize - total size occupied by this section information.
+
+In this case only sections LC_VERSION_MIN_MACOSX (for macosx 10.13 and earlier)
+and LC_BUILD_VERSION (for macosx 10.14 and newer) are interesting,
+because them contains information about minimal system version.
+
+Important remarks:
+- For fat files this implementation looks for maximum number version.
+ It not check if it is 32 or 64 and do not compare it with currently builded package.
+ So it is possible to false report higher version that needed.
+- All structures signatures are taken form macosx header files.
+- I think that binary format will be more stable than `otool` output.
+ and if apple introduce some changes both implementation will need to be updated.
+- The system compile will set the deployment target no lower than
+ 11.0 for arm64 builds. For "Universal 2" builds use the x86_64 deployment
+ target when the arm64 target is 11.0.
+"""
+
+import ctypes
+import os
+import sys
+
+"""here the needed const and struct from mach-o header files"""
+
+FAT_MAGIC = 0xcafebabe
+FAT_CIGAM = 0xbebafeca
+FAT_MAGIC_64 = 0xcafebabf
+FAT_CIGAM_64 = 0xbfbafeca
+MH_MAGIC = 0xfeedface
+MH_CIGAM = 0xcefaedfe
+MH_MAGIC_64 = 0xfeedfacf
+MH_CIGAM_64 = 0xcffaedfe
+
+LC_VERSION_MIN_MACOSX = 0x24
+LC_BUILD_VERSION = 0x32
+
+CPU_TYPE_ARM64 = 0x0100000c
+
+mach_header_fields = [
+ ("magic", ctypes.c_uint32), ("cputype", ctypes.c_int),
+ ("cpusubtype", ctypes.c_int), ("filetype", ctypes.c_uint32),
+ ("ncmds", ctypes.c_uint32), ("sizeofcmds", ctypes.c_uint32),
+ ("flags", ctypes.c_uint32)
+ ]
+"""
+struct mach_header {
+ uint32_t magic; /* mach magic number identifier */
+ cpu_type_t cputype; /* cpu specifier */
+ cpu_subtype_t cpusubtype; /* machine specifier */
+ uint32_t filetype; /* type of file */
+ uint32_t ncmds; /* number of load commands */
+ uint32_t sizeofcmds; /* the size of all the load commands */
+ uint32_t flags; /* flags */
+};
+typedef integer_t cpu_type_t;
+typedef integer_t cpu_subtype_t;
+"""
+
+mach_header_fields_64 = mach_header_fields + [("reserved", ctypes.c_uint32)]
+"""
+struct mach_header_64 {
+ uint32_t magic; /* mach magic number identifier */
+ cpu_type_t cputype; /* cpu specifier */
+ cpu_subtype_t cpusubtype; /* machine specifier */
+ uint32_t filetype; /* type of file */
+ uint32_t ncmds; /* number of load commands */
+ uint32_t sizeofcmds; /* the size of all the load commands */
+ uint32_t flags; /* flags */
+ uint32_t reserved; /* reserved */
+};
+"""
+
+fat_header_fields = [("magic", ctypes.c_uint32), ("nfat_arch", ctypes.c_uint32)]
+"""
+struct fat_header {
+ uint32_t magic; /* FAT_MAGIC or FAT_MAGIC_64 */
+ uint32_t nfat_arch; /* number of structs that follow */
+};
+"""
+
+fat_arch_fields = [
+ ("cputype", ctypes.c_int), ("cpusubtype", ctypes.c_int),
+ ("offset", ctypes.c_uint32), ("size", ctypes.c_uint32),
+ ("align", ctypes.c_uint32)
+]
+"""
+struct fat_arch {
+ cpu_type_t cputype; /* cpu specifier (int) */
+ cpu_subtype_t cpusubtype; /* machine specifier (int) */
+ uint32_t offset; /* file offset to this object file */
+ uint32_t size; /* size of this object file */
+ uint32_t align; /* alignment as a power of 2 */
+};
+"""
+
+fat_arch_64_fields = [
+ ("cputype", ctypes.c_int), ("cpusubtype", ctypes.c_int),
+ ("offset", ctypes.c_uint64), ("size", ctypes.c_uint64),
+ ("align", ctypes.c_uint32), ("reserved", ctypes.c_uint32)
+]
+"""
+struct fat_arch_64 {
+ cpu_type_t cputype; /* cpu specifier (int) */
+ cpu_subtype_t cpusubtype; /* machine specifier (int) */
+ uint64_t offset; /* file offset to this object file */
+ uint64_t size; /* size of this object file */
+ uint32_t align; /* alignment as a power of 2 */
+ uint32_t reserved; /* reserved */
+};
+"""
+
+segment_base_fields = [("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32)]
+"""base for reading segment info"""
+
+segment_command_fields = [
+ ("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32),
+ ("segname", ctypes.c_char * 16), ("vmaddr", ctypes.c_uint32),
+ ("vmsize", ctypes.c_uint32), ("fileoff", ctypes.c_uint32),
+ ("filesize", ctypes.c_uint32), ("maxprot", ctypes.c_int),
+ ("initprot", ctypes.c_int), ("nsects", ctypes.c_uint32),
+ ("flags", ctypes.c_uint32),
+ ]
+"""
+struct segment_command { /* for 32-bit architectures */
+ uint32_t cmd; /* LC_SEGMENT */
+ uint32_t cmdsize; /* includes sizeof section structs */
+ char segname[16]; /* segment name */
+ uint32_t vmaddr; /* memory address of this segment */
+ uint32_t vmsize; /* memory size of this segment */
+ uint32_t fileoff; /* file offset of this segment */
+ uint32_t filesize; /* amount to map from the file */
+ vm_prot_t maxprot; /* maximum VM protection */
+ vm_prot_t initprot; /* initial VM protection */
+ uint32_t nsects; /* number of sections in segment */
+ uint32_t flags; /* flags */
+};
+typedef int vm_prot_t;
+"""
+
+segment_command_fields_64 = [
+ ("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32),
+ ("segname", ctypes.c_char * 16), ("vmaddr", ctypes.c_uint64),
+ ("vmsize", ctypes.c_uint64), ("fileoff", ctypes.c_uint64),
+ ("filesize", ctypes.c_uint64), ("maxprot", ctypes.c_int),
+ ("initprot", ctypes.c_int), ("nsects", ctypes.c_uint32),
+ ("flags", ctypes.c_uint32),
+ ]
+"""
+struct segment_command_64 { /* for 64-bit architectures */
+ uint32_t cmd; /* LC_SEGMENT_64 */
+ uint32_t cmdsize; /* includes sizeof section_64 structs */
+ char segname[16]; /* segment name */
+ uint64_t vmaddr; /* memory address of this segment */
+ uint64_t vmsize; /* memory size of this segment */
+ uint64_t fileoff; /* file offset of this segment */
+ uint64_t filesize; /* amount to map from the file */
+ vm_prot_t maxprot; /* maximum VM protection */
+ vm_prot_t initprot; /* initial VM protection */
+ uint32_t nsects; /* number of sections in segment */
+ uint32_t flags; /* flags */
+};
+"""
+
+version_min_command_fields = segment_base_fields + \
+ [("version", ctypes.c_uint32), ("sdk", ctypes.c_uint32)]
+"""
+struct version_min_command {
+ uint32_t cmd; /* LC_VERSION_MIN_MACOSX or
+ LC_VERSION_MIN_IPHONEOS or
+ LC_VERSION_MIN_WATCHOS or
+ LC_VERSION_MIN_TVOS */
+ uint32_t cmdsize; /* sizeof(struct min_version_command) */
+ uint32_t version; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ uint32_t sdk; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+};
+"""
+
+build_version_command_fields = segment_base_fields + \
+ [("platform", ctypes.c_uint32), ("minos", ctypes.c_uint32),
+ ("sdk", ctypes.c_uint32), ("ntools", ctypes.c_uint32)]
+"""
+struct build_version_command {
+ uint32_t cmd; /* LC_BUILD_VERSION */
+ uint32_t cmdsize; /* sizeof(struct build_version_command) plus */
+ /* ntools * sizeof(struct build_tool_version) */
+ uint32_t platform; /* platform */
+ uint32_t minos; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ uint32_t sdk; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ uint32_t ntools; /* number of tool entries following this */
+};
+"""
+
+
+def swap32(x):
+ return (((x << 24) & 0xFF000000) |
+ ((x << 8) & 0x00FF0000) |
+ ((x >> 8) & 0x0000FF00) |
+ ((x >> 24) & 0x000000FF))
+
+
+def get_base_class_and_magic_number(lib_file, seek=None):
+ if seek is None:
+ seek = lib_file.tell()
+ else:
+ lib_file.seek(seek)
+ magic_number = ctypes.c_uint32.from_buffer_copy(
+ lib_file.read(ctypes.sizeof(ctypes.c_uint32))).value
+
+ # Handle wrong byte order
+ if magic_number in [FAT_CIGAM, FAT_CIGAM_64, MH_CIGAM, MH_CIGAM_64]:
+ if sys.byteorder == "little":
+ BaseClass = ctypes.BigEndianStructure
+ else:
+ BaseClass = ctypes.LittleEndianStructure
+
+ magic_number = swap32(magic_number)
+ else:
+ BaseClass = ctypes.Structure
+
+ lib_file.seek(seek)
+ return BaseClass, magic_number
+
+
+def read_data(struct_class, lib_file):
+ return struct_class.from_buffer_copy(lib_file.read(
+ ctypes.sizeof(struct_class)))
+
+
+def extract_macosx_min_system_version(path_to_lib):
+ with open(path_to_lib, "rb") as lib_file:
+ BaseClass, magic_number = get_base_class_and_magic_number(lib_file, 0)
+ if magic_number not in [FAT_MAGIC, FAT_MAGIC_64, MH_MAGIC, MH_MAGIC_64]:
+ return
+
+ if magic_number in [FAT_MAGIC, FAT_CIGAM_64]:
+ class FatHeader(BaseClass):
+ _fields_ = fat_header_fields
+
+ fat_header = read_data(FatHeader, lib_file)
+ if magic_number == FAT_MAGIC:
+
+ class FatArch(BaseClass):
+ _fields_ = fat_arch_fields
+ else:
+
+ class FatArch(BaseClass):
+ _fields_ = fat_arch_64_fields
+
+ fat_arch_list = [read_data(FatArch, lib_file) for _ in range(fat_header.nfat_arch)]
+
+ versions_list = []
+ for el in fat_arch_list:
+ try:
+ version = read_mach_header(lib_file, el.offset)
+ if version is not None:
+ if el.cputype == CPU_TYPE_ARM64 and len(fat_arch_list) != 1:
+ # Xcode will not set the deployment target below 11.0.0
+ # for the arm64 architecture. Ignore the arm64 deployment
+ # in fat binaries when the target is 11.0.0, that way
+ # the other architetures can select a lower deployment
+ # target.
+ # This is safe because there is no arm64 variant for
+ # macOS 10.15 or earlier.
+ if version == (11, 0, 0):
+ continue
+ versions_list.append(version)
+ except ValueError:
+ pass
+
+ if len(versions_list) > 0:
+ return max(versions_list)
+ else:
+ return None
+
+ else:
+ try:
+ return read_mach_header(lib_file, 0)
+ except ValueError:
+ """when some error during read library files"""
+ return None
+
+
+def read_mach_header(lib_file, seek=None):
+ """
+ This funcition parse mach-O header and extract
+ information about minimal system version
+
+ :param lib_file: reference to opened library file with pointer
+ """
+ if seek is not None:
+ lib_file.seek(seek)
+ base_class, magic_number = get_base_class_and_magic_number(lib_file)
+ arch = "32" if magic_number == MH_MAGIC else "64"
+
+ class SegmentBase(base_class):
+ _fields_ = segment_base_fields
+
+ if arch == "32":
+
+ class MachHeader(base_class):
+ _fields_ = mach_header_fields
+
+ else:
+
+ class MachHeader(base_class):
+ _fields_ = mach_header_fields_64
+
+ mach_header = read_data(MachHeader, lib_file)
+ for _i in range(mach_header.ncmds):
+ pos = lib_file.tell()
+ segment_base = read_data(SegmentBase, lib_file)
+ lib_file.seek(pos)
+ if segment_base.cmd == LC_VERSION_MIN_MACOSX:
+ class VersionMinCommand(base_class):
+ _fields_ = version_min_command_fields
+
+ version_info = read_data(VersionMinCommand, lib_file)
+ return parse_version(version_info.version)
+ elif segment_base.cmd == LC_BUILD_VERSION:
+ class VersionBuild(base_class):
+ _fields_ = build_version_command_fields
+
+ version_info = read_data(VersionBuild, lib_file)
+ return parse_version(version_info.minos)
+ else:
+ lib_file.seek(pos + segment_base.cmdsize)
+ continue
+
+
+def parse_version(version):
+ x = (version & 0xffff0000) >> 16
+ y = (version & 0x0000ff00) >> 8
+ z = (version & 0x000000ff)
+ return x, y, z
+
+
+def calculate_macosx_platform_tag(archive_root, platform_tag):
+ """
+ Calculate proper macosx platform tag basing on files which are included to wheel
+
+ Example platform tag `macosx-10.14-x86_64`
+ """
+ prefix, base_version, suffix = platform_tag.split('-')
+ base_version = tuple([int(x) for x in base_version.split(".")])
+ base_version = base_version[:2]
+ if base_version[0] > 10:
+ base_version = (base_version[0], 0)
+ assert len(base_version) == 2
+ if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
+ deploy_target = tuple([int(x) for x in os.environ[
+ "MACOSX_DEPLOYMENT_TARGET"].split(".")])
+ deploy_target = deploy_target[:2]
+ if deploy_target[0] > 10:
+ deploy_target = (deploy_target[0], 0)
+ if deploy_target < base_version:
+ sys.stderr.write(
+ "[WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value ({}) than the "
+ "version on which the Python interpreter was compiled ({}), and will be "
+ "ignored.\n".format('.'.join(str(x) for x in deploy_target),
+ '.'.join(str(x) for x in base_version))
+ )
+ else:
+ base_version = deploy_target
+
+ assert len(base_version) == 2
+ start_version = base_version
+ versions_dict = {}
+ for (dirpath, dirnames, filenames) in os.walk(archive_root):
+ for filename in filenames:
+ if filename.endswith('.dylib') or filename.endswith('.so'):
+ lib_path = os.path.join(dirpath, filename)
+ min_ver = extract_macosx_min_system_version(lib_path)
+ if min_ver is not None:
+ min_ver = min_ver[0:2]
+ if min_ver[0] > 10:
+ min_ver = (min_ver[0], 0)
+ versions_dict[lib_path] = min_ver
+
+ if len(versions_dict) > 0:
+ base_version = max(base_version, max(versions_dict.values()))
+
+ # macosx platform tag do not support minor bugfix release
+ fin_base_version = "_".join([str(x) for x in base_version])
+ if start_version < base_version:
+ problematic_files = [k for k, v in versions_dict.items() if v > start_version]
+ problematic_files = "\n".join(problematic_files)
+ if len(problematic_files) == 1:
+ files_form = "this file"
+ else:
+ files_form = "these files"
+ error_message = \
+ "[WARNING] This wheel needs a higher macOS version than {} " \
+ "To silence this warning, set MACOSX_DEPLOYMENT_TARGET to at least " +\
+ fin_base_version + " or recreate " + files_form + " with lower " \
+ "MACOSX_DEPLOYMENT_TARGET: \n" + problematic_files
+
+ if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
+ error_message = error_message.format("is set in MACOSX_DEPLOYMENT_TARGET variable.")
+ else:
+ error_message = error_message.format(
+ "the version your Python interpreter is compiled against.")
+
+ sys.stderr.write(error_message)
+
+ platform_tag = prefix + "_" + fin_base_version + "_" + suffix
+ return platform_tag
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/metadata.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..37efa74307ce654cc478bc9a065fb3b96bfb41d7
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/metadata.py
@@ -0,0 +1,133 @@
+"""
+Tools for converting old- to new-style metadata.
+"""
+
+import os.path
+import textwrap
+
+import pkg_resources
+
+from .pkginfo import read_pkg_info
+
+
+def requires_to_requires_dist(requirement):
+ """Return the version specifier for a requirement in PEP 345/566 fashion."""
+ if getattr(requirement, 'url', None):
+ return " @ " + requirement.url
+
+ requires_dist = []
+ for op, ver in requirement.specs:
+ requires_dist.append(op + ver)
+ if not requires_dist:
+ return ''
+ return " (%s)" % ','.join(sorted(requires_dist))
+
+
+def convert_requirements(requirements):
+ """Yield Requires-Dist: strings for parsed requirements strings."""
+ for req in requirements:
+ parsed_requirement = pkg_resources.Requirement.parse(req)
+ spec = requires_to_requires_dist(parsed_requirement)
+ extras = ",".join(sorted(parsed_requirement.extras))
+ if extras:
+ extras = "[%s]" % extras
+ yield (parsed_requirement.project_name + extras + spec)
+
+
+def generate_requirements(extras_require):
+ """
+ Convert requirements from a setup()-style dictionary to ('Requires-Dist', 'requirement')
+ and ('Provides-Extra', 'extra') tuples.
+
+ extras_require is a dictionary of {extra: [requirements]} as passed to setup(),
+ using the empty extra {'': [requirements]} to hold install_requires.
+ """
+ for extra, depends in extras_require.items():
+ condition = ''
+ extra = extra or ''
+ if ':' in extra: # setuptools extra:condition syntax
+ extra, condition = extra.split(':', 1)
+
+ extra = pkg_resources.safe_extra(extra)
+ if extra:
+ yield 'Provides-Extra', extra
+ if condition:
+ condition = "(" + condition + ") and "
+ condition += "extra == '%s'" % extra
+
+ if condition:
+ condition = ' ; ' + condition
+
+ for new_req in convert_requirements(depends):
+ yield 'Requires-Dist', new_req + condition
+
+
+def pkginfo_to_metadata(egg_info_path, pkginfo_path):
+ """
+ Convert .egg-info directory with PKG-INFO to the Metadata 2.1 format
+ """
+ pkg_info = read_pkg_info(pkginfo_path)
+ pkg_info.replace_header('Metadata-Version', '2.1')
+ # Those will be regenerated from `requires.txt`.
+ del pkg_info['Provides-Extra']
+ del pkg_info['Requires-Dist']
+ requires_path = os.path.join(egg_info_path, 'requires.txt')
+ if os.path.exists(requires_path):
+ with open(requires_path) as requires_file:
+ requires = requires_file.read()
+
+ parsed_requirements = sorted(pkg_resources.split_sections(requires),
+ key=lambda x: x[0] or '')
+ for extra, reqs in parsed_requirements:
+ for key, value in generate_requirements({extra: reqs}):
+ if (key, value) not in pkg_info.items():
+ pkg_info[key] = value
+
+ description = pkg_info['Description']
+ if description:
+ pkg_info.set_payload(dedent_description(pkg_info))
+ del pkg_info['Description']
+
+ return pkg_info
+
+
+def pkginfo_unicode(pkg_info, field):
+ """Hack to coax Unicode out of an email Message() - Python 3.3+"""
+ text = pkg_info[field]
+ field = field.lower()
+ if not isinstance(text, str):
+ for item in pkg_info.raw_items():
+ if item[0].lower() == field:
+ text = item[1].encode('ascii', 'surrogateescape') \
+ .decode('utf-8')
+ break
+
+ return text
+
+
+def dedent_description(pkg_info):
+ """
+ Dedent and convert pkg_info['Description'] to Unicode.
+ """
+ description = pkg_info['Description']
+
+ # Python 3 Unicode handling, sorta.
+ surrogates = False
+ if not isinstance(description, str):
+ surrogates = True
+ description = pkginfo_unicode(pkg_info, 'Description')
+
+ description_lines = description.splitlines()
+ description_dedent = '\n'.join(
+ # if the first line of long_description is blank,
+ # the first line here will be indented.
+ (description_lines[0].lstrip(),
+ textwrap.dedent('\n'.join(description_lines[1:])),
+ '\n'))
+
+ if surrogates:
+ description_dedent = description_dedent \
+ .encode("utf8") \
+ .decode("ascii", "surrogateescape")
+
+ return description_dedent
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/util.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ae2b4457ca50752977907ec3cd3c0b7d27042ac
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/util.py
@@ -0,0 +1,46 @@
+import base64
+import io
+import sys
+
+
+if sys.version_info[0] < 3:
+ text_type = unicode # noqa: F821
+
+ StringIO = io.BytesIO
+
+ def native(s, encoding='utf-8'):
+ if isinstance(s, unicode): # noqa: F821
+ return s.encode(encoding)
+ return s
+else:
+ text_type = str
+
+ StringIO = io.StringIO
+
+ def native(s, encoding='utf-8'):
+ if isinstance(s, bytes):
+ return s.decode(encoding)
+ return s
+
+
+def urlsafe_b64encode(data):
+ """urlsafe_b64encode without padding"""
+ return base64.urlsafe_b64encode(data).rstrip(b'=')
+
+
+def urlsafe_b64decode(data):
+ """urlsafe_b64decode without padding"""
+ pad = b'=' * (4 - (len(data) & 3))
+ return base64.urlsafe_b64decode(data + pad)
+
+
+def as_unicode(s):
+ if isinstance(s, bytes):
+ return s.decode('utf-8')
+ return s
+
+
+def as_bytes(s):
+ if isinstance(s, text_type):
+ return s.encode('utf-8')
+ return s
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/wheelfile.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/wheelfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ee97dddd234324a86f8daffad01b8d83b5146d9
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/wheelfile.py
@@ -0,0 +1,169 @@
+from __future__ import print_function
+
+import csv
+import hashlib
+import os.path
+import re
+import stat
+import time
+from collections import OrderedDict
+from distutils import log as logger
+from zipfile import ZIP_DEFLATED, ZipInfo, ZipFile
+
+from wheel.cli import WheelError
+from wheel.util import urlsafe_b64decode, as_unicode, native, urlsafe_b64encode, as_bytes, StringIO
+
+# Non-greedy matching of an optional build number may be too clever (more
+# invalid wheel filenames will match). Separate regex for .dist-info?
+WHEEL_INFO_RE = re.compile(
+ r"""^(?P(?P.+?)-(?P.+?))(-(?P\d[^-]*))?
+ -(?P.+?)-(?P.+?)-(?P.+?)\.whl$""",
+ re.VERBOSE)
+
+
+def get_zipinfo_datetime(timestamp=None):
+ # Some applications need reproducible .whl files, but they can't do this without forcing
+ # the timestamp of the individual ZipInfo objects. See issue #143.
+ timestamp = int(os.environ.get('SOURCE_DATE_EPOCH', timestamp or time.time()))
+ return time.gmtime(timestamp)[0:6]
+
+
+class WheelFile(ZipFile):
+ """A ZipFile derivative class that also reads SHA-256 hashes from
+ .dist-info/RECORD and checks any read files against those.
+ """
+
+ _default_algorithm = hashlib.sha256
+
+ def __init__(self, file, mode='r', compression=ZIP_DEFLATED):
+ basename = os.path.basename(file)
+ self.parsed_filename = WHEEL_INFO_RE.match(basename)
+ if not basename.endswith('.whl') or self.parsed_filename is None:
+ raise WheelError("Bad wheel filename {!r}".format(basename))
+
+ ZipFile.__init__(self, file, mode, compression=compression, allowZip64=True)
+
+ self.dist_info_path = '{}.dist-info'.format(self.parsed_filename.group('namever'))
+ self.record_path = self.dist_info_path + '/RECORD'
+ self._file_hashes = OrderedDict()
+ self._file_sizes = {}
+ if mode == 'r':
+ # Ignore RECORD and any embedded wheel signatures
+ self._file_hashes[self.record_path] = None, None
+ self._file_hashes[self.record_path + '.jws'] = None, None
+ self._file_hashes[self.record_path + '.p7s'] = None, None
+
+ # Fill in the expected hashes by reading them from RECORD
+ try:
+ record = self.open(self.record_path)
+ except KeyError:
+ raise WheelError('Missing {} file'.format(self.record_path))
+
+ with record:
+ for line in record:
+ line = line.decode('utf-8')
+ path, hash_sum, size = line.rsplit(u',', 2)
+ if hash_sum:
+ algorithm, hash_sum = hash_sum.split(u'=')
+ try:
+ hashlib.new(algorithm)
+ except ValueError:
+ raise WheelError('Unsupported hash algorithm: {}'.format(algorithm))
+
+ if algorithm.lower() in {'md5', 'sha1'}:
+ raise WheelError(
+ 'Weak hash algorithm ({}) is not permitted by PEP 427'
+ .format(algorithm))
+
+ self._file_hashes[path] = (
+ algorithm, urlsafe_b64decode(hash_sum.encode('ascii')))
+
+ def open(self, name_or_info, mode="r", pwd=None):
+ def _update_crc(newdata, eof=None):
+ if eof is None:
+ eof = ef._eof
+ update_crc_orig(newdata)
+ else: # Python 2
+ update_crc_orig(newdata, eof)
+
+ running_hash.update(newdata)
+ if eof and running_hash.digest() != expected_hash:
+ raise WheelError("Hash mismatch for file '{}'".format(native(ef_name)))
+
+ ef_name = as_unicode(name_or_info.filename if isinstance(name_or_info, ZipInfo)
+ else name_or_info)
+ if mode == 'r' and not ef_name.endswith('/') and ef_name not in self._file_hashes:
+ raise WheelError("No hash found for file '{}'".format(native(ef_name)))
+
+ ef = ZipFile.open(self, name_or_info, mode, pwd)
+ if mode == 'r' and not ef_name.endswith('/'):
+ algorithm, expected_hash = self._file_hashes[ef_name]
+ if expected_hash is not None:
+ # Monkey patch the _update_crc method to also check for the hash from RECORD
+ running_hash = hashlib.new(algorithm)
+ update_crc_orig, ef._update_crc = ef._update_crc, _update_crc
+
+ return ef
+
+ def write_files(self, base_dir):
+ logger.info("creating '%s' and adding '%s' to it", self.filename, base_dir)
+ deferred = []
+ for root, dirnames, filenames in os.walk(base_dir):
+ # Sort the directory names so that `os.walk` will walk them in a
+ # defined order on the next iteration.
+ dirnames.sort()
+ for name in sorted(filenames):
+ path = os.path.normpath(os.path.join(root, name))
+ if os.path.isfile(path):
+ arcname = os.path.relpath(path, base_dir).replace(os.path.sep, '/')
+ if arcname == self.record_path:
+ pass
+ elif root.endswith('.dist-info'):
+ deferred.append((path, arcname))
+ else:
+ self.write(path, arcname)
+
+ deferred.sort()
+ for path, arcname in deferred:
+ self.write(path, arcname)
+
+ def write(self, filename, arcname=None, compress_type=None):
+ with open(filename, 'rb') as f:
+ st = os.fstat(f.fileno())
+ data = f.read()
+
+ zinfo = ZipInfo(arcname or filename, date_time=get_zipinfo_datetime(st.st_mtime))
+ zinfo.external_attr = (stat.S_IMODE(st.st_mode) | stat.S_IFMT(st.st_mode)) << 16
+ zinfo.compress_type = compress_type or self.compression
+ self.writestr(zinfo, data, compress_type)
+
+ def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
+ ZipFile.writestr(self, zinfo_or_arcname, bytes, compress_type)
+ fname = (zinfo_or_arcname.filename if isinstance(zinfo_or_arcname, ZipInfo)
+ else zinfo_or_arcname)
+ logger.info("adding '%s'", fname)
+ if fname != self.record_path:
+ hash_ = self._default_algorithm(bytes)
+ self._file_hashes[fname] = hash_.name, native(urlsafe_b64encode(hash_.digest()))
+ self._file_sizes[fname] = len(bytes)
+
+ def close(self):
+ # Write RECORD
+ if self.fp is not None and self.mode == 'w' and self._file_hashes:
+ data = StringIO()
+ writer = csv.writer(data, delimiter=',', quotechar='"', lineterminator='\n')
+ writer.writerows((
+ (
+ fname,
+ algorithm + "=" + hash_,
+ self._file_sizes[fname]
+ )
+ for fname, (algorithm, hash_) in self._file_hashes.items()
+ ))
+ writer.writerow((format(self.record_path), "", ""))
+ zinfo = ZipInfo(native(self.record_path), date_time=get_zipinfo_datetime())
+ zinfo.compress_type = self.compression
+ zinfo.external_attr = 0o664 << 16
+ self.writestr(zinfo, as_bytes(data.getvalue()))
+
+ ZipFile.close(self)
diff --git a/my_container_sandbox/workspace/anaconda3/pkgs/ld_impl_linux-64-2.35.1-h7274673_9.conda b/my_container_sandbox/workspace/anaconda3/pkgs/ld_impl_linux-64-2.35.1-h7274673_9.conda
new file mode 100644
index 0000000000000000000000000000000000000000..65eeaadb0603c62ebbdc083ee5542dc4e1f72a4d
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/pkgs/ld_impl_linux-64-2.35.1-h7274673_9.conda
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cc5b453d585754bb9d52bac9e2045f32eb0fc43d74ec3eee21e44604cf1a6485
+size 600155
diff --git a/my_container_sandbox/workspace/anaconda3/pkgs/openssl-1.1.1k-h27cfd23_0.conda b/my_container_sandbox/workspace/anaconda3/pkgs/openssl-1.1.1k-h27cfd23_0.conda
new file mode 100644
index 0000000000000000000000000000000000000000..b587d5b20fda621258aa3b2f697a4f3e4ff88151
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/pkgs/openssl-1.1.1k-h27cfd23_0.conda
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dd9ff7c11ee7690256772eeae1cfebb63d8643c8329ff489ec93f64863e7b421
+size 2657446
diff --git a/my_container_sandbox/workspace/anaconda3/pkgs/readline-8.1-h27cfd23_0.conda b/my_container_sandbox/workspace/anaconda3/pkgs/readline-8.1-h27cfd23_0.conda
new file mode 100644
index 0000000000000000000000000000000000000000..bc15931679b1cd859dd1a11428925e0209a260d2
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/pkgs/readline-8.1-h27cfd23_0.conda
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5fe35471214d5fc544fa4c37808a6ffbd294d8f6d3ef5c484f2f72d7493e8d1d
+size 371059
diff --git a/tmp_inputs_4_3/case00007.nii.gz b/tmp_inputs_4_3/case00007.nii.gz
new file mode 100644
index 0000000000000000000000000000000000000000..a8c7fdb833b1211a5967e71015065bd52be7ee3a
--- /dev/null
+++ b/tmp_inputs_4_3/case00007.nii.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:453716882149fec5014bdc3a2e7dd0bcdd00c49da99942e5ce955f85aa00b0c2
+size 33253044