diff --git a/.gitattributes b/.gitattributes index 308365b51190ee3605b046cae6ff4493aadf2083..336a200d8b6e9bd02a1c27eb7ce4ec3cd158ee78 100644 --- a/.gitattributes +++ b/.gitattributes @@ -370,3 +370,7 @@ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor my_container_sandbox/workspace/anaconda3/lib/libnppist.so.11.3.3.95 filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/more.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor/pyparsing/__pycache__/core.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/__pycache__/__init__.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/__pycache__/backend_bases.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/objectify.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/CommonTokenFactory.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/CommonTokenFactory.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5931604af71ab88257f2658bc8e5f30ede5c4b6a Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/CommonTokenFactory.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/InputStream.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/InputStream.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..124cc97838ae2ed18a999f1709422aa7d23533fc Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/InputStream.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/ListTokenSource.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/ListTokenSource.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..340a9ab76604f50ef5b44ec58db7a75c6b7fc583 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/ListTokenSource.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/Recognizer.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/Recognizer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89c9e2270c1e5b5aafb94125a47689dd673ac7cb Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/Recognizer.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/RuleContext.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/RuleContext.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe15dadc610a15c9556745a6150ab67020ac1497 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/RuleContext.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/TokenStreamRewriter.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/TokenStreamRewriter.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53ea249b76659382ced3c7531083c0450a0934fa Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/TokenStreamRewriter.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATN.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATN.py new file mode 100644 index 0000000000000000000000000000000000000000..3f1abe0a4a7faacde5140d5631dfe48a79325cd4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATN.py @@ -0,0 +1,132 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ +from antlr4.IntervalSet import IntervalSet + +from antlr4.RuleContext import RuleContext + +from antlr4.Token import Token +from antlr4.atn.ATNType import ATNType +from antlr4.atn.ATNState import ATNState, DecisionState + + +class ATN(object): + __slots__ = ( + 'grammarType', 'maxTokenType', 'states', 'decisionToState', + 'ruleToStartState', 'ruleToStopState', 'modeNameToStartState', + 'ruleToTokenType', 'lexerActions', 'modeToStartState' + ) + + INVALID_ALT_NUMBER = 0 + + # Used for runtime deserialization of ATNs from strings#/ + def __init__(self, grammarType:ATNType , maxTokenType:int ): + # The type of the ATN. + self.grammarType = grammarType + # The maximum value for any symbol recognized by a transition in the ATN. + self.maxTokenType = maxTokenType + self.states = [] + # Each subrule/rule is a decision point and we must track them so we + # can go back later and build DFA predictors for them. This includes + # all the rules, subrules, optional blocks, ()+, ()* etc... + self.decisionToState = [] + # Maps from rule index to starting state number. + self.ruleToStartState = [] + # Maps from rule index to stop state number. + self.ruleToStopState = None + self.modeNameToStartState = dict() + # For lexer ATNs, this maps the rule index to the resulting token type. + # For parser ATNs, this maps the rule index to the generated bypass token + # type if the + # {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions} + # deserialization option was specified; otherwise, this is {@code null}. + self.ruleToTokenType = None + # For lexer ATNs, this is an array of {@link LexerAction} objects which may + # be referenced by action transitions in the ATN. + self.lexerActions = None + self.modeToStartState = [] + + # Compute the set of valid tokens that can occur starting in state {@code s}. + # If {@code ctx} is null, the set of tokens will not include what can follow + # the rule surrounding {@code s}. In other words, the set will be + # restricted to tokens reachable staying within {@code s}'s rule. + def nextTokensInContext(self, s:ATNState, ctx:RuleContext): + from antlr4.LL1Analyzer import LL1Analyzer + anal = LL1Analyzer(self) + return anal.LOOK(s, ctx=ctx) + + # Compute the set of valid tokens that can occur starting in {@code s} and + # staying in same rule. {@link Token#EPSILON} is in set if we reach end of + # rule. + def nextTokensNoContext(self, s:ATNState): + if s.nextTokenWithinRule is not None: + return s.nextTokenWithinRule + s.nextTokenWithinRule = self.nextTokensInContext(s, None) + s.nextTokenWithinRule.readonly = True + return s.nextTokenWithinRule + + def nextTokens(self, s:ATNState, ctx:RuleContext = None): + if ctx==None: + return self.nextTokensNoContext(s) + else: + return self.nextTokensInContext(s, ctx) + + def addState(self, state:ATNState): + if state is not None: + state.atn = self + state.stateNumber = len(self.states) + self.states.append(state) + + def removeState(self, state:ATNState): + self.states[state.stateNumber] = None # just free mem, don't shift states in list + + def defineDecisionState(self, s:DecisionState): + self.decisionToState.append(s) + s.decision = len(self.decisionToState)-1 + return s.decision + + def getDecisionState(self, decision:int): + if len(self.decisionToState)==0: + return None + else: + return self.decisionToState[decision] + + # Computes the set of input symbols which could follow ATN state number + # {@code stateNumber} in the specified full {@code context}. This method + # considers the complete parser context, but does not evaluate semantic + # predicates (i.e. all predicates encountered during the calculation are + # assumed true). If a path in the ATN exists from the starting state to the + # {@link RuleStopState} of the outermost context without matching any + # symbols, {@link Token#EOF} is added to the returned set. + # + #

If {@code context} is {@code null}, it is treated as + # {@link ParserRuleContext#EMPTY}.

+ # + # @param stateNumber the ATN state number + # @param context the full parse context + # @return The set of potentially valid input symbols which could follow the + # specified state in the specified context. + # @throws IllegalArgumentException if the ATN does not contain a state with + # number {@code stateNumber} + #/ + def getExpectedTokens(self, stateNumber:int, ctx:RuleContext ): + if stateNumber < 0 or stateNumber >= len(self.states): + raise Exception("Invalid state number.") + s = self.states[stateNumber] + following = self.nextTokens(s) + if Token.EPSILON not in following: + return following + expected = IntervalSet() + expected.addSet(following) + expected.removeOne(Token.EPSILON) + while (ctx != None and ctx.invokingState >= 0 and Token.EPSILON in following): + invokingState = self.states[ctx.invokingState] + rt = invokingState.transitions[0] + following = self.nextTokens(rt.followState) + expected.addSet(following) + expected.removeOne(Token.EPSILON) + ctx = ctx.parentCtx + if Token.EPSILON in following: + expected.addOne(Token.EOF) + return expected diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfig.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfig.py new file mode 100644 index 0000000000000000000000000000000000000000..e008fb2efac45a24b6d9566170bbd227cee252b6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfig.py @@ -0,0 +1,159 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ + +# A tuple: (ATN state, predicted alt, syntactic, semantic context). +# The syntactic context is a graph-structured stack node whose +# path(s) to the root is the rule invocation(s) +# chain used to arrive at the state. The semantic context is +# the tree of semantic predicates encountered before reaching +# an ATN state. +#/ +from io import StringIO +from antlr4.PredictionContext import PredictionContext +from antlr4.atn.ATNState import ATNState, DecisionState +from antlr4.atn.LexerActionExecutor import LexerActionExecutor +from antlr4.atn.SemanticContext import SemanticContext + +# need a forward declaration +ATNConfig = None + +class ATNConfig(object): + __slots__ = ( + 'state', 'alt', 'context', 'semanticContext', 'reachesIntoOuterContext', + 'precedenceFilterSuppressed' + ) + + def __init__(self, state:ATNState=None, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=None, config:ATNConfig=None): + if config is not None: + if state is None: + state = config.state + if alt is None: + alt = config.alt + if context is None: + context = config.context + if semantic is None: + semantic = config.semanticContext + if semantic is None: + semantic = SemanticContext.NONE + # The ATN state associated with this configuration#/ + self.state = state + # What alt (or lexer rule) is predicted by this configuration#/ + self.alt = alt + # The stack of invoking states leading to the rule/states associated + # with this config. We track only those contexts pushed during + # execution of the ATN simulator. + self.context = context + self.semanticContext = semantic + # We cannot execute predicates dependent upon local context unless + # we know for sure we are in the correct context. Because there is + # no way to do this efficiently, we simply cannot evaluate + # dependent predicates unless we are in the rule that initially + # invokes the ATN simulator. + # + # closure() tracks the depth of how far we dip into the + # outer context: depth > 0. Note that it may not be totally + # accurate depth since I don't ever decrement. TODO: make it a boolean then + self.reachesIntoOuterContext = 0 if config is None else config.reachesIntoOuterContext + self.precedenceFilterSuppressed = False if config is None else config.precedenceFilterSuppressed + + # An ATN configuration is equal to another if both have + # the same state, they predict the same alternative, and + # syntactic/semantic contexts are the same. + #/ + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, ATNConfig): + return False + else: + return self.state.stateNumber==other.state.stateNumber \ + and self.alt==other.alt \ + and ((self.context is other.context) or (self.context==other.context)) \ + and self.semanticContext==other.semanticContext \ + and self.precedenceFilterSuppressed==other.precedenceFilterSuppressed + + def __hash__(self): + return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext)) + + def hashCodeForConfigSet(self): + return hash((self.state.stateNumber, self.alt, hash(self.semanticContext))) + + def equalsForConfigSet(self, other): + if self is other: + return True + elif not isinstance(other, ATNConfig): + return False + else: + return self.state.stateNumber==other.state.stateNumber \ + and self.alt==other.alt \ + and self.semanticContext==other.semanticContext + + def __str__(self): + with StringIO() as buf: + buf.write('(') + buf.write(str(self.state)) + buf.write(",") + buf.write(str(self.alt)) + if self.context is not None: + buf.write(",[") + buf.write(str(self.context)) + buf.write("]") + if self.semanticContext is not None and self.semanticContext is not SemanticContext.NONE: + buf.write(",") + buf.write(str(self.semanticContext)) + if self.reachesIntoOuterContext>0: + buf.write(",up=") + buf.write(str(self.reachesIntoOuterContext)) + buf.write(')') + return buf.getvalue() + +# need a forward declaration +LexerATNConfig = None + +class LexerATNConfig(ATNConfig): + __slots__ = ('lexerActionExecutor', 'passedThroughNonGreedyDecision') + + def __init__(self, state:ATNState, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=SemanticContext.NONE, + lexerActionExecutor:LexerActionExecutor=None, config:LexerATNConfig=None): + super().__init__(state=state, alt=alt, context=context, semantic=semantic, config=config) + if config is not None: + if lexerActionExecutor is None: + lexerActionExecutor = config.lexerActionExecutor + # This is the backing field for {@link #getLexerActionExecutor}. + self.lexerActionExecutor = lexerActionExecutor + self.passedThroughNonGreedyDecision = False if config is None else self.checkNonGreedyDecision(config, state) + + def __hash__(self): + return hash((self.state.stateNumber, self.alt, self.context, + self.semanticContext, self.passedThroughNonGreedyDecision, + self.lexerActionExecutor)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerATNConfig): + return False + if self.passedThroughNonGreedyDecision != other.passedThroughNonGreedyDecision: + return False + if not(self.lexerActionExecutor == other.lexerActionExecutor): + return False + return super().__eq__(other) + + + + def hashCodeForConfigSet(self): + return hash(self) + + + + def equalsForConfigSet(self, other): + return self==other + + + + def checkNonGreedyDecision(self, source:LexerATNConfig, target:ATNState): + return source.passedThroughNonGreedyDecision \ + or isinstance(target, DecisionState) and target.nonGreedy diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfigSet.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfigSet.py new file mode 100644 index 0000000000000000000000000000000000000000..9e9a512a682431e46a3f5b848420a762af3c7914 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfigSet.py @@ -0,0 +1,212 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. + +# +# Specialized {@link Set}{@code <}{@link ATNConfig}{@code >} that can track +# info about the set, with support for combining similar configurations using a +# graph-structured stack. +#/ +from io import StringIO +from functools import reduce +from antlr4.PredictionContext import PredictionContext, merge +from antlr4.Utils import str_list +from antlr4.atn.ATN import ATN +from antlr4.atn.ATNConfig import ATNConfig +from antlr4.atn.SemanticContext import SemanticContext +from antlr4.error.Errors import UnsupportedOperationException, IllegalStateException + +ATNSimulator = None + +class ATNConfigSet(object): + __slots__ = ( + 'configLookup', 'fullCtx', 'readonly', 'configs', 'uniqueAlt', + 'conflictingAlts', 'hasSemanticContext', 'dipsIntoOuterContext', + 'cachedHashCode' + ) + + # + # The reason that we need this is because we don't want the hash map to use + # the standard hash code and equals. We need all configurations with the same + # {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively doubles + # the number of objects associated with ATNConfigs. The other solution is to + # use a hash table that lets us specify the equals/hashcode operation. + + def __init__(self, fullCtx:bool=True): + # All configs but hashed by (s, i, _, pi) not including context. Wiped out + # when we go readonly as this set becomes a DFA state. + self.configLookup = dict() + # Indicates that this configuration set is part of a full context + # LL prediction. It will be used to determine how to merge $. With SLL + # it's a wildcard whereas it is not for LL context merge. + self.fullCtx = fullCtx + # Indicates that the set of configurations is read-only. Do not + # allow any code to manipulate the set; DFA states will point at + # the sets and they must not change. This does not protect the other + # fields; in particular, conflictingAlts is set after + # we've made this readonly. + self.readonly = False + # Track the elements as they are added to the set; supports get(i)#/ + self.configs = [] + + # TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation + # TODO: can we track conflicts as they are added to save scanning configs later? + self.uniqueAlt = 0 + self.conflictingAlts = None + + # Used in parser and lexer. In lexer, it indicates we hit a pred + # while computing a closure operation. Don't make a DFA state from this. + self.hasSemanticContext = False + self.dipsIntoOuterContext = False + + self.cachedHashCode = -1 + + def __iter__(self): + return self.configs.__iter__() + + # Adding a new config means merging contexts with existing configs for + # {@code (s, i, pi, _)}, where {@code s} is the + # {@link ATNConfig#state}, {@code i} is the {@link ATNConfig#alt}, and + # {@code pi} is the {@link ATNConfig#semanticContext}. We use + # {@code (s,i,pi)} as key. + # + #

This method updates {@link #dipsIntoOuterContext} and + # {@link #hasSemanticContext} when necessary.

+ #/ + def add(self, config:ATNConfig, mergeCache=None): + if self.readonly: + raise Exception("This set is readonly") + if config.semanticContext is not SemanticContext.NONE: + self.hasSemanticContext = True + if config.reachesIntoOuterContext > 0: + self.dipsIntoOuterContext = True + existing = self.getOrAdd(config) + if existing is config: + self.cachedHashCode = -1 + self.configs.append(config) # track order here + return True + # a previous (s,i,pi,_), merge with it and save result + rootIsWildcard = not self.fullCtx + merged = merge(existing.context, config.context, rootIsWildcard, mergeCache) + # no need to check for existing.context, config.context in cache + # since only way to create new graphs is "call rule" and here. + # We cache at both places. + existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext) + # make sure to preserve the precedence filter suppression during the merge + if config.precedenceFilterSuppressed: + existing.precedenceFilterSuppressed = True + existing.context = merged # replace context; no need to alt mapping + return True + + def getOrAdd(self, config:ATNConfig): + h = config.hashCodeForConfigSet() + l = self.configLookup.get(h, None) + if l is not None: + r = next((cfg for cfg in l if config.equalsForConfigSet(cfg)), None) + if r is not None: + return r + if l is None: + l = [config] + self.configLookup[h] = l + else: + l.append(config) + return config + + def getStates(self): + return set(c.state for c in self.configs) + + def getPredicates(self): + return list(cfg.semanticContext for cfg in self.configs if cfg.semanticContext!=SemanticContext.NONE) + + def get(self, i:int): + return self.configs[i] + + def optimizeConfigs(self, interpreter:ATNSimulator): + if self.readonly: + raise IllegalStateException("This set is readonly") + if len(self.configs)==0: + return + for config in self.configs: + config.context = interpreter.getCachedContext(config.context) + + def addAll(self, coll:list): + for c in coll: + self.add(c) + return False + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, ATNConfigSet): + return False + + same = self.configs is not None and \ + self.configs==other.configs and \ + self.fullCtx == other.fullCtx and \ + self.uniqueAlt == other.uniqueAlt and \ + self.conflictingAlts == other.conflictingAlts and \ + self.hasSemanticContext == other.hasSemanticContext and \ + self.dipsIntoOuterContext == other.dipsIntoOuterContext + + return same + + def __hash__(self): + if self.readonly: + if self.cachedHashCode == -1: + self.cachedHashCode = self.hashConfigs() + return self.cachedHashCode + return self.hashConfigs() + + def hashConfigs(self): + return reduce(lambda h, cfg: hash((h, cfg)), self.configs, 0) + + def __len__(self): + return len(self.configs) + + def isEmpty(self): + return len(self.configs)==0 + + def __contains__(self, config): + if self.configLookup is None: + raise UnsupportedOperationException("This method is not implemented for readonly sets.") + h = config.hashCodeForConfigSet() + l = self.configLookup.get(h, None) + if l is not None: + for c in l: + if config.equalsForConfigSet(c): + return True + return False + + def clear(self): + if self.readonly: + raise IllegalStateException("This set is readonly") + self.configs.clear() + self.cachedHashCode = -1 + self.configLookup.clear() + + def setReadonly(self, readonly:bool): + self.readonly = readonly + self.configLookup = None # can't mod, no need for lookup cache + + def __str__(self): + with StringIO() as buf: + buf.write(str_list(self.configs)) + if self.hasSemanticContext: + buf.write(",hasSemanticContext=") + buf.write(str(self.hasSemanticContext)) + if self.uniqueAlt!=ATN.INVALID_ALT_NUMBER: + buf.write(",uniqueAlt=") + buf.write(str(self.uniqueAlt)) + if self.conflictingAlts is not None: + buf.write(",conflictingAlts=") + buf.write(str(self.conflictingAlts)) + if self.dipsIntoOuterContext: + buf.write(",dipsIntoOuterContext") + return buf.getvalue() + + +class OrderedATNConfigSet(ATNConfigSet): + + def __init__(self): + super().__init__() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializationOptions.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializationOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..69d5437f35437874a60bcc76118f018edbf65fed --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializationOptions.py @@ -0,0 +1,24 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. + +# need a forward declaration +ATNDeserializationOptions = None + +class ATNDeserializationOptions(object): + __slots__ = ('readonly', 'verifyATN', 'generateRuleBypassTransitions') + + defaultOptions = None + + def __init__(self, copyFrom:ATNDeserializationOptions = None): + self.readonly = False + self.verifyATN = True if copyFrom is None else copyFrom.verifyATN + self.generateRuleBypassTransitions = False if copyFrom is None else copyFrom.generateRuleBypassTransitions + + def __setattr__(self, key, value): + if key!="readonly" and self.readonly: + raise Exception("The object is read only.") + super(type(self), self).__setattr__(key,value) + +ATNDeserializationOptions.defaultOptions = ATNDeserializationOptions() +ATNDeserializationOptions.defaultOptions.readonly = True diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializer.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializer.py new file mode 100644 index 0000000000000000000000000000000000000000..cc100d05a43e10b91390dd47a7b05f2eff6fe23f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializer.py @@ -0,0 +1,529 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ +from uuid import UUID +from io import StringIO +from typing import Callable +from antlr4.Token import Token +from antlr4.atn.ATN import ATN +from antlr4.atn.ATNType import ATNType +from antlr4.atn.ATNState import * +from antlr4.atn.Transition import * +from antlr4.atn.LexerAction import * +from antlr4.atn.ATNDeserializationOptions import ATNDeserializationOptions + +# This is the earliest supported serialized UUID. +BASE_SERIALIZED_UUID = UUID("AADB8D7E-AEEF-4415-AD2B-8204D6CF042E") + +# This UUID indicates the serialized ATN contains two sets of +# IntervalSets, where the second set's values are encoded as +# 32-bit integers to support the full Unicode SMP range up to U+10FFFF. +ADDED_UNICODE_SMP = UUID("59627784-3BE5-417A-B9EB-8131A7286089") + +# This list contains all of the currently supported UUIDs, ordered by when +# the feature first appeared in this branch. +SUPPORTED_UUIDS = [ BASE_SERIALIZED_UUID, ADDED_UNICODE_SMP ] + +SERIALIZED_VERSION = 3 + +# This is the current serialized UUID. +SERIALIZED_UUID = ADDED_UNICODE_SMP + +class ATNDeserializer (object): + __slots__ = ('deserializationOptions', 'data', 'pos', 'uuid') + + def __init__(self, options : ATNDeserializationOptions = None): + if options is None: + options = ATNDeserializationOptions.defaultOptions + self.deserializationOptions = options + + # Determines if a particular serialized representation of an ATN supports + # a particular feature, identified by the {@link UUID} used for serializing + # the ATN at the time the feature was first introduced. + # + # @param feature The {@link UUID} marking the first time the feature was + # supported in the serialized ATN. + # @param actualUuid The {@link UUID} of the actual serialized ATN which is + # currently being deserialized. + # @return {@code true} if the {@code actualUuid} value represents a + # serialized ATN at or after the feature identified by {@code feature} was + # introduced; otherwise, {@code false}. + + def isFeatureSupported(self, feature : UUID , actualUuid : UUID ): + idx1 = SUPPORTED_UUIDS.index(feature) + if idx1<0: + return False + idx2 = SUPPORTED_UUIDS.index(actualUuid) + return idx2 >= idx1 + + def deserialize(self, data : str): + self.reset(data) + self.checkVersion() + self.checkUUID() + atn = self.readATN() + self.readStates(atn) + self.readRules(atn) + self.readModes(atn) + sets = [] + # First, read all sets with 16-bit Unicode code points <= U+FFFF. + self.readSets(atn, sets, self.readInt) + # Next, if the ATN was serialized with the Unicode SMP feature, + # deserialize sets with 32-bit arguments <= U+10FFFF. + if self.isFeatureSupported(ADDED_UNICODE_SMP, self.uuid): + self.readSets(atn, sets, self.readInt32) + self.readEdges(atn, sets) + self.readDecisions(atn) + self.readLexerActions(atn) + self.markPrecedenceDecisions(atn) + self.verifyATN(atn) + if self.deserializationOptions.generateRuleBypassTransitions \ + and atn.grammarType == ATNType.PARSER: + self.generateRuleBypassTransitions(atn) + # re-verify after modification + self.verifyATN(atn) + return atn + + def reset(self, data:str): + def adjust(c): + v = ord(c) + return v-2 if v>1 else v + 65533 + temp = [ adjust(c) for c in data ] + # don't adjust the first value since that's the version number + temp[0] = ord(data[0]) + self.data = temp + self.pos = 0 + + def checkVersion(self): + version = self.readInt() + if version != SERIALIZED_VERSION: + raise Exception("Could not deserialize ATN with version " + str(version) + " (expected " + str(SERIALIZED_VERSION) + ").") + + def checkUUID(self): + uuid = self.readUUID() + if not uuid in SUPPORTED_UUIDS: + raise Exception("Could not deserialize ATN with UUID: " + str(uuid) + \ + " (expected " + str(SERIALIZED_UUID) + " or a legacy UUID).", uuid, SERIALIZED_UUID) + self.uuid = uuid + + def readATN(self): + idx = self.readInt() + grammarType = ATNType.fromOrdinal(idx) + maxTokenType = self.readInt() + return ATN(grammarType, maxTokenType) + + def readStates(self, atn:ATN): + loopBackStateNumbers = [] + endStateNumbers = [] + nstates = self.readInt() + for i in range(0, nstates): + stype = self.readInt() + # ignore bad type of states + if stype==ATNState.INVALID_TYPE: + atn.addState(None) + continue + ruleIndex = self.readInt() + if ruleIndex == 0xFFFF: + ruleIndex = -1 + + s = self.stateFactory(stype, ruleIndex) + if stype == ATNState.LOOP_END: # special case + loopBackStateNumber = self.readInt() + loopBackStateNumbers.append((s, loopBackStateNumber)) + elif isinstance(s, BlockStartState): + endStateNumber = self.readInt() + endStateNumbers.append((s, endStateNumber)) + + atn.addState(s) + + # delay the assignment of loop back and end states until we know all the state instances have been initialized + for pair in loopBackStateNumbers: + pair[0].loopBackState = atn.states[pair[1]] + + for pair in endStateNumbers: + pair[0].endState = atn.states[pair[1]] + + numNonGreedyStates = self.readInt() + for i in range(0, numNonGreedyStates): + stateNumber = self.readInt() + atn.states[stateNumber].nonGreedy = True + + numPrecedenceStates = self.readInt() + for i in range(0, numPrecedenceStates): + stateNumber = self.readInt() + atn.states[stateNumber].isPrecedenceRule = True + + def readRules(self, atn:ATN): + nrules = self.readInt() + if atn.grammarType == ATNType.LEXER: + atn.ruleToTokenType = [0] * nrules + + atn.ruleToStartState = [0] * nrules + for i in range(0, nrules): + s = self.readInt() + startState = atn.states[s] + atn.ruleToStartState[i] = startState + if atn.grammarType == ATNType.LEXER: + tokenType = self.readInt() + if tokenType == 0xFFFF: + tokenType = Token.EOF + + atn.ruleToTokenType[i] = tokenType + + atn.ruleToStopState = [0] * nrules + for state in atn.states: + if not isinstance(state, RuleStopState): + continue + atn.ruleToStopState[state.ruleIndex] = state + atn.ruleToStartState[state.ruleIndex].stopState = state + + def readModes(self, atn:ATN): + nmodes = self.readInt() + for i in range(0, nmodes): + s = self.readInt() + atn.modeToStartState.append(atn.states[s]) + + def readSets(self, atn:ATN, sets:list, readUnicode:Callable[[], int]): + m = self.readInt() + for i in range(0, m): + iset = IntervalSet() + sets.append(iset) + n = self.readInt() + containsEof = self.readInt() + if containsEof!=0: + iset.addOne(-1) + for j in range(0, n): + i1 = readUnicode() + i2 = readUnicode() + iset.addRange(range(i1, i2 + 1)) # range upper limit is exclusive + + def readEdges(self, atn:ATN, sets:list): + nedges = self.readInt() + for i in range(0, nedges): + src = self.readInt() + trg = self.readInt() + ttype = self.readInt() + arg1 = self.readInt() + arg2 = self.readInt() + arg3 = self.readInt() + trans = self.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) + srcState = atn.states[src] + srcState.addTransition(trans) + + # edges for rule stop states can be derived, so they aren't serialized + for state in atn.states: + for i in range(0, len(state.transitions)): + t = state.transitions[i] + if not isinstance(t, RuleTransition): + continue + outermostPrecedenceReturn = -1 + if atn.ruleToStartState[t.target.ruleIndex].isPrecedenceRule: + if t.precedence == 0: + outermostPrecedenceReturn = t.target.ruleIndex + trans = EpsilonTransition(t.followState, outermostPrecedenceReturn) + atn.ruleToStopState[t.target.ruleIndex].addTransition(trans) + + for state in atn.states: + if isinstance(state, BlockStartState): + # we need to know the end state to set its start state + if state.endState is None: + raise Exception("IllegalState") + # block end states can only be associated to a single block start state + if state.endState.startState is not None: + raise Exception("IllegalState") + state.endState.startState = state + + if isinstance(state, PlusLoopbackState): + for i in range(0, len(state.transitions)): + target = state.transitions[i].target + if isinstance(target, PlusBlockStartState): + target.loopBackState = state + elif isinstance(state, StarLoopbackState): + for i in range(0, len(state.transitions)): + target = state.transitions[i].target + if isinstance(target, StarLoopEntryState): + target.loopBackState = state + + def readDecisions(self, atn:ATN): + ndecisions = self.readInt() + for i in range(0, ndecisions): + s = self.readInt() + decState = atn.states[s] + atn.decisionToState.append(decState) + decState.decision = i + + def readLexerActions(self, atn:ATN): + if atn.grammarType == ATNType.LEXER: + count = self.readInt() + atn.lexerActions = [ None ] * count + for i in range(0, count): + actionType = self.readInt() + data1 = self.readInt() + if data1 == 0xFFFF: + data1 = -1 + data2 = self.readInt() + if data2 == 0xFFFF: + data2 = -1 + lexerAction = self.lexerActionFactory(actionType, data1, data2) + atn.lexerActions[i] = lexerAction + + def generateRuleBypassTransitions(self, atn:ATN): + + count = len(atn.ruleToStartState) + atn.ruleToTokenType = [ 0 ] * count + for i in range(0, count): + atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 + + for i in range(0, count): + self.generateRuleBypassTransition(atn, i) + + def generateRuleBypassTransition(self, atn:ATN, idx:int): + + bypassStart = BasicBlockStartState() + bypassStart.ruleIndex = idx + atn.addState(bypassStart) + + bypassStop = BlockEndState() + bypassStop.ruleIndex = idx + atn.addState(bypassStop) + + bypassStart.endState = bypassStop + atn.defineDecisionState(bypassStart) + + bypassStop.startState = bypassStart + + excludeTransition = None + + if atn.ruleToStartState[idx].isPrecedenceRule: + # wrap from the beginning of the rule to the StarLoopEntryState + endState = None + for state in atn.states: + if self.stateIsEndStateFor(state, idx): + endState = state + excludeTransition = state.loopBackState.transitions[0] + break + + if excludeTransition is None: + raise Exception("Couldn't identify final state of the precedence rule prefix section.") + + else: + + endState = atn.ruleToStopState[idx] + + # all non-excluded transitions that currently target end state need to target blockEnd instead + for state in atn.states: + for transition in state.transitions: + if transition == excludeTransition: + continue + if transition.target == endState: + transition.target = bypassStop + + # all transitions leaving the rule start state need to leave blockStart instead + ruleToStartState = atn.ruleToStartState[idx] + count = len(ruleToStartState.transitions) + while count > 0: + bypassStart.addTransition(ruleToStartState.transitions[count-1]) + del ruleToStartState.transitions[-1] + + # link the new states + atn.ruleToStartState[idx].addTransition(EpsilonTransition(bypassStart)) + bypassStop.addTransition(EpsilonTransition(endState)) + + matchState = BasicState() + atn.addState(matchState) + matchState.addTransition(AtomTransition(bypassStop, atn.ruleToTokenType[idx])) + bypassStart.addTransition(EpsilonTransition(matchState)) + + + def stateIsEndStateFor(self, state:ATNState, idx:int): + if state.ruleIndex != idx: + return None + if not isinstance(state, StarLoopEntryState): + return None + + maybeLoopEndState = state.transitions[len(state.transitions) - 1].target + if not isinstance(maybeLoopEndState, LoopEndState): + return None + + if maybeLoopEndState.epsilonOnlyTransitions and \ + isinstance(maybeLoopEndState.transitions[0].target, RuleStopState): + return state + else: + return None + + + # + # Analyze the {@link StarLoopEntryState} states in the specified ATN to set + # the {@link StarLoopEntryState#isPrecedenceDecision} field to the + # correct value. + # + # @param atn The ATN. + # + def markPrecedenceDecisions(self, atn:ATN): + for state in atn.states: + if not isinstance(state, StarLoopEntryState): + continue + + # We analyze the ATN to determine if this ATN decision state is the + # decision for the closure block that determines whether a + # precedence rule should continue or complete. + # + if atn.ruleToStartState[state.ruleIndex].isPrecedenceRule: + maybeLoopEndState = state.transitions[len(state.transitions) - 1].target + if isinstance(maybeLoopEndState, LoopEndState): + if maybeLoopEndState.epsilonOnlyTransitions and \ + isinstance(maybeLoopEndState.transitions[0].target, RuleStopState): + state.isPrecedenceDecision = True + + def verifyATN(self, atn:ATN): + if not self.deserializationOptions.verifyATN: + return + # verify assumptions + for state in atn.states: + if state is None: + continue + + self.checkCondition(state.epsilonOnlyTransitions or len(state.transitions) <= 1) + + if isinstance(state, PlusBlockStartState): + self.checkCondition(state.loopBackState is not None) + + if isinstance(state, StarLoopEntryState): + self.checkCondition(state.loopBackState is not None) + self.checkCondition(len(state.transitions) == 2) + + if isinstance(state.transitions[0].target, StarBlockStartState): + self.checkCondition(isinstance(state.transitions[1].target, LoopEndState)) + self.checkCondition(not state.nonGreedy) + elif isinstance(state.transitions[0].target, LoopEndState): + self.checkCondition(isinstance(state.transitions[1].target, StarBlockStartState)) + self.checkCondition(state.nonGreedy) + else: + raise Exception("IllegalState") + + if isinstance(state, StarLoopbackState): + self.checkCondition(len(state.transitions) == 1) + self.checkCondition(isinstance(state.transitions[0].target, StarLoopEntryState)) + + if isinstance(state, LoopEndState): + self.checkCondition(state.loopBackState is not None) + + if isinstance(state, RuleStartState): + self.checkCondition(state.stopState is not None) + + if isinstance(state, BlockStartState): + self.checkCondition(state.endState is not None) + + if isinstance(state, BlockEndState): + self.checkCondition(state.startState is not None) + + if isinstance(state, DecisionState): + self.checkCondition(len(state.transitions) <= 1 or state.decision >= 0) + else: + self.checkCondition(len(state.transitions) <= 1 or isinstance(state, RuleStopState)) + + def checkCondition(self, condition:bool, message=None): + if not condition: + if message is None: + message = "IllegalState" + raise Exception(message) + + def readInt(self): + i = self.data[self.pos] + self.pos += 1 + return i + + def readInt32(self): + low = self.readInt() + high = self.readInt() + return low | (high << 16) + + def readLong(self): + low = self.readInt32() + high = self.readInt32() + return (low & 0x00000000FFFFFFFF) | (high << 32) + + def readUUID(self): + low = self.readLong() + high = self.readLong() + allBits = (low & 0xFFFFFFFFFFFFFFFF) | (high << 64) + return UUID(int=allBits) + + edgeFactories = [ lambda args : None, + lambda atn, src, trg, arg1, arg2, arg3, sets, target : EpsilonTransition(target), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + RangeTransition(target, Token.EOF, arg2) if arg3 != 0 else RangeTransition(target, arg1, arg2), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + RuleTransition(atn.states[arg1], arg2, arg3, target), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + PredicateTransition(target, arg1, arg2, arg3 != 0), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + AtomTransition(target, Token.EOF) if arg3 != 0 else AtomTransition(target, arg1), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + ActionTransition(target, arg1, arg2, arg3 != 0), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + SetTransition(target, sets[arg1]), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + NotSetTransition(target, sets[arg1]), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + WildcardTransition(target), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + PrecedencePredicateTransition(target, arg1) + ] + + def edgeFactory(self, atn:ATN, type:int, src:int, trg:int, arg1:int, arg2:int, arg3:int, sets:list): + target = atn.states[trg] + if type > len(self.edgeFactories) or self.edgeFactories[type] is None: + raise Exception("The specified transition type: " + str(type) + " is not valid.") + else: + return self.edgeFactories[type](atn, src, trg, arg1, arg2, arg3, sets, target) + + stateFactories = [ lambda : None, + lambda : BasicState(), + lambda : RuleStartState(), + lambda : BasicBlockStartState(), + lambda : PlusBlockStartState(), + lambda : StarBlockStartState(), + lambda : TokensStartState(), + lambda : RuleStopState(), + lambda : BlockEndState(), + lambda : StarLoopbackState(), + lambda : StarLoopEntryState(), + lambda : PlusLoopbackState(), + lambda : LoopEndState() + ] + + def stateFactory(self, type:int, ruleIndex:int): + if type> len(self.stateFactories) or self.stateFactories[type] is None: + raise Exception("The specified state type " + str(type) + " is not valid.") + else: + s = self.stateFactories[type]() + if s is not None: + s.ruleIndex = ruleIndex + return s + + CHANNEL = 0 #The type of a {@link LexerChannelAction} action. + CUSTOM = 1 #The type of a {@link LexerCustomAction} action. + MODE = 2 #The type of a {@link LexerModeAction} action. + MORE = 3 #The type of a {@link LexerMoreAction} action. + POP_MODE = 4 #The type of a {@link LexerPopModeAction} action. + PUSH_MODE = 5 #The type of a {@link LexerPushModeAction} action. + SKIP = 6 #The type of a {@link LexerSkipAction} action. + TYPE = 7 #The type of a {@link LexerTypeAction} action. + + actionFactories = [ lambda data1, data2: LexerChannelAction(data1), + lambda data1, data2: LexerCustomAction(data1, data2), + lambda data1, data2: LexerModeAction(data1), + lambda data1, data2: LexerMoreAction.INSTANCE, + lambda data1, data2: LexerPopModeAction.INSTANCE, + lambda data1, data2: LexerPushModeAction(data1), + lambda data1, data2: LexerSkipAction.INSTANCE, + lambda data1, data2: LexerTypeAction(data1) + ] + + def lexerActionFactory(self, type:int, data1:int, data2:int): + + if type > len(self.actionFactories) or self.actionFactories[type] is None: + raise Exception("The specified lexer action type " + str(type) + " is not valid.") + else: + return self.actionFactories[type](data1, data2) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNSimulator.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNSimulator.py new file mode 100644 index 0000000000000000000000000000000000000000..4f6f53f488fa2b731fb39ceecf350b0cb43d1c39 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNSimulator.py @@ -0,0 +1,47 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ +from antlr4.PredictionContext import PredictionContextCache, PredictionContext, getCachedPredictionContext +from antlr4.atn.ATN import ATN +from antlr4.atn.ATNConfigSet import ATNConfigSet +from antlr4.dfa.DFAState import DFAState + + +class ATNSimulator(object): + __slots__ = ('atn', 'sharedContextCache', '__dict__') + + # Must distinguish between missing edge and edge we know leads nowhere#/ + ERROR = DFAState(configs=ATNConfigSet()) + ERROR.stateNumber = 0x7FFFFFFF + + # The context cache maps all PredictionContext objects that are == + # to a single cached copy. This cache is shared across all contexts + # in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet + # to use only cached nodes/graphs in addDFAState(). We don't want to + # fill this during closure() since there are lots of contexts that + # pop up but are not used ever again. It also greatly slows down closure(). + # + #

This cache makes a huge difference in memory and a little bit in speed. + # For the Java grammar on java.*, it dropped the memory requirements + # at the end from 25M to 16M. We don't store any of the full context + # graphs in the DFA because they are limited to local context only, + # but apparently there's a lot of repetition there as well. We optimize + # the config contexts before storing the config set in the DFA states + # by literally rebuilding them with cached subgraphs only.

+ # + #

I tried a cache for use during closure operations, that was + # whacked after each adaptivePredict(). It cost a little bit + # more time I think and doesn't save on the overall footprint + # so it's not worth the complexity.

+ #/ + def __init__(self, atn:ATN, sharedContextCache:PredictionContextCache): + self.atn = atn + self.sharedContextCache = sharedContextCache + + def getCachedContext(self, context:PredictionContext): + if self.sharedContextCache is None: + return context + visited = dict() + return getCachedPredictionContext(context, self.sharedContextCache, visited) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNState.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNState.py new file mode 100644 index 0000000000000000000000000000000000000000..fbf6a7b9442905c992bb1a88ce343a8a9d63f5b7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNState.py @@ -0,0 +1,264 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# The following images show the relation of states and +# {@link ATNState#transitions} for various grammar constructs. +# +# +# +#

Basic Blocks

+# +#

Rule

+# +# +# +#

Block of 1 or more alternatives

+# +# +# +#

Greedy Loops

+# +#

Greedy Closure: {@code (...)*}

+# +# +# +#

Greedy Positive Closure: {@code (...)+}

+# +# +# +#

Greedy Optional: {@code (...)?}

+# +# +# +#

Non-Greedy Loops

+# +#

Non-Greedy Closure: {@code (...)*?}

+# +# +# +#

Non-Greedy Positive Closure: {@code (...)+?}

+# +# +# +#

Non-Greedy Optional: {@code (...)??}

+# +# +# + +from antlr4.atn.Transition import Transition + +INITIAL_NUM_TRANSITIONS = 4 + +class ATNState(object): + __slots__ = ( + 'atn', 'stateNumber', 'stateType', 'ruleIndex', 'epsilonOnlyTransitions', + 'transitions', 'nextTokenWithinRule', + ) + + # constants for serialization + INVALID_TYPE = 0 + BASIC = 1 + RULE_START = 2 + BLOCK_START = 3 + PLUS_BLOCK_START = 4 + STAR_BLOCK_START = 5 + TOKEN_START = 6 + RULE_STOP = 7 + BLOCK_END = 8 + STAR_LOOP_BACK = 9 + STAR_LOOP_ENTRY = 10 + PLUS_LOOP_BACK = 11 + LOOP_END = 12 + + serializationNames = [ + "INVALID", + "BASIC", + "RULE_START", + "BLOCK_START", + "PLUS_BLOCK_START", + "STAR_BLOCK_START", + "TOKEN_START", + "RULE_STOP", + "BLOCK_END", + "STAR_LOOP_BACK", + "STAR_LOOP_ENTRY", + "PLUS_LOOP_BACK", + "LOOP_END" ] + + INVALID_STATE_NUMBER = -1 + + def __init__(self): + # Which ATN are we in? + self.atn = None + self.stateNumber = ATNState.INVALID_STATE_NUMBER + self.stateType = None + self.ruleIndex = 0 # at runtime, we don't have Rule objects + self.epsilonOnlyTransitions = False + # Track the transitions emanating from this ATN state. + self.transitions = [] + # Used to cache lookahead during parsing, not used during construction + self.nextTokenWithinRule = None + + def __hash__(self): + return self.stateNumber + + def __eq__(self, other): + return isinstance(other, ATNState) and self.stateNumber==other.stateNumber + + def onlyHasEpsilonTransitions(self): + return self.epsilonOnlyTransitions + + def isNonGreedyExitState(self): + return False + + def __str__(self): + return str(self.stateNumber) + + def addTransition(self, trans:Transition, index:int=-1): + if len(self.transitions)==0: + self.epsilonOnlyTransitions = trans.isEpsilon + elif self.epsilonOnlyTransitions != trans.isEpsilon: + self.epsilonOnlyTransitions = False + # TODO System.err.format(Locale.getDefault(), "ATN state %d has both epsilon and non-epsilon transitions.\n", stateNumber); + if index==-1: + self.transitions.append(trans) + else: + self.transitions.insert(index, trans) + +class BasicState(ATNState): + + def __init__(self): + super().__init__() + self.stateType = self.BASIC + + +class DecisionState(ATNState): + __slots__ = ('decision', 'nonGreedy') + def __init__(self): + super().__init__() + self.decision = -1 + self.nonGreedy = False + +# The start of a regular {@code (...)} block. +class BlockStartState(DecisionState): + __slots__ = 'endState' + + def __init__(self): + super().__init__() + self.endState = None + +class BasicBlockStartState(BlockStartState): + + def __init__(self): + super().__init__() + self.stateType = self.BLOCK_START + +# Terminal node of a simple {@code (a|b|c)} block. +class BlockEndState(ATNState): + __slots__ = 'startState' + + def __init__(self): + super().__init__() + self.stateType = self.BLOCK_END + self.startState = None + +# The last node in the ATN for a rule, unless that rule is the start symbol. +# In that case, there is one transition to EOF. Later, we might encode +# references to all calls to this rule to compute FOLLOW sets for +# error handling. +# +class RuleStopState(ATNState): + + def __init__(self): + super().__init__() + self.stateType = self.RULE_STOP + +class RuleStartState(ATNState): + __slots__ = ('stopState', 'isPrecedenceRule') + + def __init__(self): + super().__init__() + self.stateType = self.RULE_START + self.stopState = None + self.isPrecedenceRule = False + +# Decision state for {@code A+} and {@code (A|B)+}. It has two transitions: +# one to the loop back to start of the block and one to exit. +# +class PlusLoopbackState(DecisionState): + + def __init__(self): + super().__init__() + self.stateType = self.PLUS_LOOP_BACK + +# Start of {@code (A|B|...)+} loop. Technically a decision state, but +# we don't use for code generation; somebody might need it, so I'm defining +# it for completeness. In reality, the {@link PlusLoopbackState} node is the +# real decision-making note for {@code A+}. +# +class PlusBlockStartState(BlockStartState): + __slots__ = 'loopBackState' + + def __init__(self): + super().__init__() + self.stateType = self.PLUS_BLOCK_START + self.loopBackState = None + +# The block that begins a closure loop. +class StarBlockStartState(BlockStartState): + + def __init__(self): + super().__init__() + self.stateType = self.STAR_BLOCK_START + +class StarLoopbackState(ATNState): + + def __init__(self): + super().__init__() + self.stateType = self.STAR_LOOP_BACK + + +class StarLoopEntryState(DecisionState): + __slots__ = ('loopBackState', 'isPrecedenceDecision') + + def __init__(self): + super().__init__() + self.stateType = self.STAR_LOOP_ENTRY + self.loopBackState = None + # Indicates whether this state can benefit from a precedence DFA during SLL decision making. + self.isPrecedenceDecision = None + +# Mark the end of a * or + loop. +class LoopEndState(ATNState): + __slots__ = 'loopBackState' + + def __init__(self): + super().__init__() + self.stateType = self.LOOP_END + self.loopBackState = None + +# The Tokens rule start state linking to each lexer rule start state */ +class TokensStartState(DecisionState): + + def __init__(self): + super().__init__() + self.stateType = self.TOKEN_START diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNType.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNType.py new file mode 100644 index 0000000000000000000000000000000000000000..cc0d4d925c6babee1caeee08f5e32fe4a1d6d0d4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNType.py @@ -0,0 +1,17 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ + +from enum import IntEnum + +# Represents the type of recognizer an ATN applies to. + +class ATNType(IntEnum): + + LEXER = 0 + PARSER = 1 + + @classmethod + def fromOrdinal(cls, i:int): + return cls._value2member_map_[i] diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerATNSimulator.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerATNSimulator.py new file mode 100644 index 0000000000000000000000000000000000000000..71201ff5f989820d340aac8672ab31db8579b55c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerATNSimulator.py @@ -0,0 +1,570 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ + +# When we hit an accept state in either the DFA or the ATN, we +# have to notify the character stream to start buffering characters +# via {@link IntStream#mark} and record the current state. The current sim state +# includes the current index into the input, the current line, +# and current character position in that line. Note that the Lexer is +# tracking the starting line and characterization of the token. These +# variables track the "state" of the simulator when it hits an accept state. +# +#

We track these variables separately for the DFA and ATN simulation +# because the DFA simulation often has to fail over to the ATN +# simulation. If the ATN simulation fails, we need the DFA to fall +# back to its previously accepted state, if any. If the ATN succeeds, +# then the ATN does the accept and the DFA simulator that invoked it +# can simply return the predicted token type.

+#/ + +from antlr4.PredictionContext import PredictionContextCache, SingletonPredictionContext, PredictionContext +from antlr4.InputStream import InputStream +from antlr4.Token import Token +from antlr4.atn.ATN import ATN +from antlr4.atn.ATNConfig import LexerATNConfig +from antlr4.atn.ATNSimulator import ATNSimulator +from antlr4.atn.ATNConfigSet import ATNConfigSet, OrderedATNConfigSet +from antlr4.atn.ATNState import RuleStopState, ATNState +from antlr4.atn.LexerActionExecutor import LexerActionExecutor +from antlr4.atn.Transition import Transition +from antlr4.dfa.DFAState import DFAState +from antlr4.error.Errors import LexerNoViableAltException, UnsupportedOperationException + +class SimState(object): + __slots__ = ('index', 'line', 'column', 'dfaState') + + def __init__(self): + self.reset() + + def reset(self): + self.index = -1 + self.line = 0 + self.column = -1 + self.dfaState = None + +# need forward declaration +Lexer = None +LexerATNSimulator = None + +class LexerATNSimulator(ATNSimulator): + __slots__ = ( + 'decisionToDFA', 'recog', 'startIndex', 'line', 'column', 'mode', + 'DEFAULT_MODE', 'MAX_CHAR_VALUE', 'prevAccept' + ) + + debug = False + dfa_debug = False + + MIN_DFA_EDGE = 0 + MAX_DFA_EDGE = 127 # forces unicode to stay in ATN + + ERROR = None + + def __init__(self, recog:Lexer, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache): + super().__init__(atn, sharedContextCache) + self.decisionToDFA = decisionToDFA + self.recog = recog + # The current token's starting index into the character stream. + # Shared across DFA to ATN simulation in case the ATN fails and the + # DFA did not have a previous accept state. In this case, we use the + # ATN-generated exception object. + self.startIndex = -1 + # line number 1..n within the input#/ + self.line = 1 + # The index of the character relative to the beginning of the line 0..n-1#/ + self.column = 0 + from antlr4.Lexer import Lexer + self.mode = Lexer.DEFAULT_MODE + # Cache Lexer properties to avoid further imports + self.DEFAULT_MODE = Lexer.DEFAULT_MODE + self.MAX_CHAR_VALUE = Lexer.MAX_CHAR_VALUE + # Used during DFA/ATN exec to record the most recent accept configuration info + self.prevAccept = SimState() + + + def copyState(self, simulator:LexerATNSimulator ): + self.column = simulator.column + self.line = simulator.line + self.mode = simulator.mode + self.startIndex = simulator.startIndex + + def match(self, input:InputStream , mode:int): + self.mode = mode + mark = input.mark() + try: + self.startIndex = input.index + self.prevAccept.reset() + dfa = self.decisionToDFA[mode] + if dfa.s0 is None: + return self.matchATN(input) + else: + return self.execATN(input, dfa.s0) + finally: + input.release(mark) + + def reset(self): + self.prevAccept.reset() + self.startIndex = -1 + self.line = 1 + self.column = 0 + self.mode = self.DEFAULT_MODE + + def matchATN(self, input:InputStream): + startState = self.atn.modeToStartState[self.mode] + + if LexerATNSimulator.debug: + print("matchATN mode " + str(self.mode) + " start: " + str(startState)) + + old_mode = self.mode + s0_closure = self.computeStartState(input, startState) + suppressEdge = s0_closure.hasSemanticContext + s0_closure.hasSemanticContext = False + + next = self.addDFAState(s0_closure) + if not suppressEdge: + self.decisionToDFA[self.mode].s0 = next + + predict = self.execATN(input, next) + + if LexerATNSimulator.debug: + print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString())) + + return predict + + def execATN(self, input:InputStream, ds0:DFAState): + if LexerATNSimulator.debug: + print("start state closure=" + str(ds0.configs)) + + if ds0.isAcceptState: + # allow zero-length tokens + self.captureSimState(self.prevAccept, input, ds0) + + t = input.LA(1) + s = ds0 # s is current/from DFA state + + while True: # while more work + if LexerATNSimulator.debug: + print("execATN loop starting closure:", str(s.configs)) + + # As we move src->trg, src->trg, we keep track of the previous trg to + # avoid looking up the DFA state again, which is expensive. + # If the previous target was already part of the DFA, we might + # be able to avoid doing a reach operation upon t. If s!=null, + # it means that semantic predicates didn't prevent us from + # creating a DFA state. Once we know s!=null, we check to see if + # the DFA state has an edge already for t. If so, we can just reuse + # it's configuration set; there's no point in re-computing it. + # This is kind of like doing DFA simulation within the ATN + # simulation because DFA simulation is really just a way to avoid + # computing reach/closure sets. Technically, once we know that + # we have a previously added DFA state, we could jump over to + # the DFA simulator. But, that would mean popping back and forth + # a lot and making things more complicated algorithmically. + # This optimization makes a lot of sense for loops within DFA. + # A character will take us back to an existing DFA state + # that already has lots of edges out of it. e.g., .* in comments. + # print("Target for:" + str(s) + " and:" + str(t)) + target = self.getExistingTargetState(s, t) + # print("Existing:" + str(target)) + if target is None: + target = self.computeTargetState(input, s, t) + # print("Computed:" + str(target)) + + if target == self.ERROR: + break + + # If this is a consumable input element, make sure to consume before + # capturing the accept state so the input index, line, and char + # position accurately reflect the state of the interpreter at the + # end of the token. + if t != Token.EOF: + self.consume(input) + + if target.isAcceptState: + self.captureSimState(self.prevAccept, input, target) + if t == Token.EOF: + break + + t = input.LA(1) + + s = target # flip; current DFA target becomes new src/from state + + return self.failOrAccept(self.prevAccept, input, s.configs, t) + + # Get an existing target state for an edge in the DFA. If the target state + # for the edge has not yet been computed or is otherwise not available, + # this method returns {@code null}. + # + # @param s The current DFA state + # @param t The next input symbol + # @return The existing target DFA state for the given input symbol + # {@code t}, or {@code null} if the target state for this edge is not + # already cached + def getExistingTargetState(self, s:DFAState, t:int): + if s.edges is None or t < self.MIN_DFA_EDGE or t > self.MAX_DFA_EDGE: + return None + + target = s.edges[t - self.MIN_DFA_EDGE] + if LexerATNSimulator.debug and target is not None: + print("reuse state", str(s.stateNumber), "edge to", str(target.stateNumber)) + + return target + + # Compute a target state for an edge in the DFA, and attempt to add the + # computed state and corresponding edge to the DFA. + # + # @param input The input stream + # @param s The current DFA state + # @param t The next input symbol + # + # @return The computed target DFA state for the given input symbol + # {@code t}. If {@code t} does not lead to a valid DFA state, this method + # returns {@link #ERROR}. + def computeTargetState(self, input:InputStream, s:DFAState, t:int): + reach = OrderedATNConfigSet() + + # if we don't find an existing DFA state + # Fill reach starting from closure, following t transitions + self.getReachableConfigSet(input, s.configs, reach, t) + + if len(reach)==0: # we got nowhere on t from s + if not reach.hasSemanticContext: + # we got nowhere on t, don't throw out this knowledge; it'd + # cause a failover from DFA later. + self. addDFAEdge(s, t, self.ERROR) + + # stop when we can't match any more char + return self.ERROR + + # Add an edge from s to target DFA found/created for reach + return self.addDFAEdge(s, t, cfgs=reach) + + def failOrAccept(self, prevAccept:SimState , input:InputStream, reach:ATNConfigSet, t:int): + if self.prevAccept.dfaState is not None: + lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor + self.accept(input, lexerActionExecutor, self.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) + return prevAccept.dfaState.prediction + else: + # if no accept and EOF is first char, return EOF + if t==Token.EOF and input.index==self.startIndex: + return Token.EOF + raise LexerNoViableAltException(self.recog, input, self.startIndex, reach) + + # Given a starting configuration set, figure out all ATN configurations + # we can reach upon input {@code t}. Parameter {@code reach} is a return + # parameter. + def getReachableConfigSet(self, input:InputStream, closure:ATNConfigSet, reach:ATNConfigSet, t:int): + # this is used to skip processing for configs which have a lower priority + # than a config that already reached an accept state for the same rule + skipAlt = ATN.INVALID_ALT_NUMBER + for cfg in closure: + currentAltReachedAcceptState = ( cfg.alt == skipAlt ) + if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision: + continue + + if LexerATNSimulator.debug: + print("testing", self.getTokenName(t), "at", str(cfg)) + + for trans in cfg.state.transitions: # for each transition + target = self.getReachableTarget(trans, t) + if target is not None: + lexerActionExecutor = cfg.lexerActionExecutor + if lexerActionExecutor is not None: + lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - self.startIndex) + + treatEofAsEpsilon = (t == Token.EOF) + config = LexerATNConfig(state=target, lexerActionExecutor=lexerActionExecutor, config=cfg) + if self.closure(input, config, reach, currentAltReachedAcceptState, True, treatEofAsEpsilon): + # any remaining configs for this alt have a lower priority than + # the one that just reached an accept state. + skipAlt = cfg.alt + + def accept(self, input:InputStream, lexerActionExecutor:LexerActionExecutor, startIndex:int, index:int, line:int, charPos:int): + if LexerATNSimulator.debug: + print("ACTION", lexerActionExecutor) + + # seek to after last char in token + input.seek(index) + self.line = line + self.column = charPos + + if lexerActionExecutor is not None and self.recog is not None: + lexerActionExecutor.execute(self.recog, input, startIndex) + + def getReachableTarget(self, trans:Transition, t:int): + if trans.matches(t, 0, self.MAX_CHAR_VALUE): + return trans.target + else: + return None + + def computeStartState(self, input:InputStream, p:ATNState): + initialContext = PredictionContext.EMPTY + configs = OrderedATNConfigSet() + for i in range(0,len(p.transitions)): + target = p.transitions[i].target + c = LexerATNConfig(state=target, alt=i+1, context=initialContext) + self.closure(input, c, configs, False, False, False) + return configs + + # Since the alternatives within any lexer decision are ordered by + # preference, this method stops pursuing the closure as soon as an accept + # state is reached. After the first accept state is reached by depth-first + # search from {@code config}, all other (potentially reachable) states for + # this rule would have a lower priority. + # + # @return {@code true} if an accept state is reached, otherwise + # {@code false}. + def closure(self, input:InputStream, config:LexerATNConfig, configs:ATNConfigSet, currentAltReachedAcceptState:bool, + speculative:bool, treatEofAsEpsilon:bool): + if LexerATNSimulator.debug: + print("closure(" + str(config) + ")") + + if isinstance( config.state, RuleStopState ): + if LexerATNSimulator.debug: + if self.recog is not None: + print("closure at", self.recog.symbolicNames[config.state.ruleIndex], "rule stop", str(config)) + else: + print("closure at rule stop", str(config)) + + if config.context is None or config.context.hasEmptyPath(): + if config.context is None or config.context.isEmpty(): + configs.add(config) + return True + else: + configs.add(LexerATNConfig(state=config.state, config=config, context=PredictionContext.EMPTY)) + currentAltReachedAcceptState = True + + if config.context is not None and not config.context.isEmpty(): + for i in range(0,len(config.context)): + if config.context.getReturnState(i) != PredictionContext.EMPTY_RETURN_STATE: + newContext = config.context.getParent(i) # "pop" return state + returnState = self.atn.states[config.context.getReturnState(i)] + c = LexerATNConfig(state=returnState, config=config, context=newContext) + currentAltReachedAcceptState = self.closure(input, c, configs, + currentAltReachedAcceptState, speculative, treatEofAsEpsilon) + + return currentAltReachedAcceptState + + # optimization + if not config.state.epsilonOnlyTransitions: + if not currentAltReachedAcceptState or not config.passedThroughNonGreedyDecision: + configs.add(config) + + for t in config.state.transitions: + c = self.getEpsilonTarget(input, config, t, configs, speculative, treatEofAsEpsilon) + if c is not None: + currentAltReachedAcceptState = self.closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon) + + return currentAltReachedAcceptState + + # side-effect: can alter configs.hasSemanticContext + def getEpsilonTarget(self, input:InputStream, config:LexerATNConfig, t:Transition, configs:ATNConfigSet, + speculative:bool, treatEofAsEpsilon:bool): + c = None + if t.serializationType==Transition.RULE: + newContext = SingletonPredictionContext.create(config.context, t.followState.stateNumber) + c = LexerATNConfig(state=t.target, config=config, context=newContext) + + elif t.serializationType==Transition.PRECEDENCE: + raise UnsupportedOperationException("Precedence predicates are not supported in lexers.") + + elif t.serializationType==Transition.PREDICATE: + # Track traversing semantic predicates. If we traverse, + # we cannot add a DFA state for this "reach" computation + # because the DFA would not test the predicate again in the + # future. Rather than creating collections of semantic predicates + # like v3 and testing them on prediction, v4 will test them on the + # fly all the time using the ATN not the DFA. This is slower but + # semantically it's not used that often. One of the key elements to + # this predicate mechanism is not adding DFA states that see + # predicates immediately afterwards in the ATN. For example, + + # a : ID {p1}? | ID {p2}? ; + + # should create the start state for rule 'a' (to save start state + # competition), but should not create target of ID state. The + # collection of ATN states the following ID references includes + # states reached by traversing predicates. Since this is when we + # test them, we cannot cash the DFA state target of ID. + + if LexerATNSimulator.debug: + print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex)) + configs.hasSemanticContext = True + if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative): + c = LexerATNConfig(state=t.target, config=config) + + elif t.serializationType==Transition.ACTION: + if config.context is None or config.context.hasEmptyPath(): + # execute actions anywhere in the start rule for a token. + # + # TODO: if the entry rule is invoked recursively, some + # actions may be executed during the recursive call. The + # problem can appear when hasEmptyPath() is true but + # isEmpty() is false. In this case, the config needs to be + # split into two contexts - one with just the empty path + # and another with everything but the empty path. + # Unfortunately, the current algorithm does not allow + # getEpsilonTarget to return two configurations, so + # additional modifications are needed before we can support + # the split operation. + lexerActionExecutor = LexerActionExecutor.append(config.lexerActionExecutor, + self.atn.lexerActions[t.actionIndex]) + c = LexerATNConfig(state=t.target, config=config, lexerActionExecutor=lexerActionExecutor) + + else: + # ignore actions in referenced rules + c = LexerATNConfig(state=t.target, config=config) + + elif t.serializationType==Transition.EPSILON: + c = LexerATNConfig(state=t.target, config=config) + + elif t.serializationType in [ Transition.ATOM, Transition.RANGE, Transition.SET ]: + if treatEofAsEpsilon: + if t.matches(Token.EOF, 0, self.MAX_CHAR_VALUE): + c = LexerATNConfig(state=t.target, config=config) + + return c + + # Evaluate a predicate specified in the lexer. + # + #

If {@code speculative} is {@code true}, this method was called before + # {@link #consume} for the matched character. This method should call + # {@link #consume} before evaluating the predicate to ensure position + # sensitive values, including {@link Lexer#getText}, {@link Lexer#getLine}, + # and {@link Lexer#getcolumn}, properly reflect the current + # lexer state. This method should restore {@code input} and the simulator + # to the original state before returning (i.e. undo the actions made by the + # call to {@link #consume}.

+ # + # @param input The input stream. + # @param ruleIndex The rule containing the predicate. + # @param predIndex The index of the predicate within the rule. + # @param speculative {@code true} if the current index in {@code input} is + # one character before the predicate's location. + # + # @return {@code true} if the specified predicate evaluates to + # {@code true}. + #/ + def evaluatePredicate(self, input:InputStream, ruleIndex:int, predIndex:int, speculative:bool): + # assume true if no recognizer was provided + if self.recog is None: + return True + + if not speculative: + return self.recog.sempred(None, ruleIndex, predIndex) + + savedcolumn = self.column + savedLine = self.line + index = input.index + marker = input.mark() + try: + self.consume(input) + return self.recog.sempred(None, ruleIndex, predIndex) + finally: + self.column = savedcolumn + self.line = savedLine + input.seek(index) + input.release(marker) + + def captureSimState(self, settings:SimState, input:InputStream, dfaState:DFAState): + settings.index = input.index + settings.line = self.line + settings.column = self.column + settings.dfaState = dfaState + + def addDFAEdge(self, from_:DFAState, tk:int, to:DFAState=None, cfgs:ATNConfigSet=None) -> DFAState: + + if to is None and cfgs is not None: + # leading to this call, ATNConfigSet.hasSemanticContext is used as a + # marker indicating dynamic predicate evaluation makes this edge + # dependent on the specific input sequence, so the static edge in the + # DFA should be omitted. The target DFAState is still created since + # execATN has the ability to resynchronize with the DFA state cache + # following the predicate evaluation step. + # + # TJP notes: next time through the DFA, we see a pred again and eval. + # If that gets us to a previously created (but dangling) DFA + # state, we can continue in pure DFA mode from there. + #/ + suppressEdge = cfgs.hasSemanticContext + cfgs.hasSemanticContext = False + + to = self.addDFAState(cfgs) + + if suppressEdge: + return to + + # add the edge + if tk < self.MIN_DFA_EDGE or tk > self.MAX_DFA_EDGE: + # Only track edges within the DFA bounds + return to + + if LexerATNSimulator.debug: + print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk)) + + if from_.edges is None: + # make room for tokens 1..n and -1 masquerading as index 0 + from_.edges = [ None ] * (self.MAX_DFA_EDGE - self.MIN_DFA_EDGE + 1) + + from_.edges[tk - self.MIN_DFA_EDGE] = to # connect + + return to + + + # Add a new DFA state if there isn't one with this set of + # configurations already. This method also detects the first + # configuration containing an ATN rule stop state. Later, when + # traversing the DFA, we will know which rule to accept. + def addDFAState(self, configs:ATNConfigSet) -> DFAState: + + proposed = DFAState(configs=configs) + firstConfigWithRuleStopState = next((cfg for cfg in configs if isinstance(cfg.state, RuleStopState)), None) + + if firstConfigWithRuleStopState is not None: + proposed.isAcceptState = True + proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor + proposed.prediction = self.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex] + + dfa = self.decisionToDFA[self.mode] + existing = dfa.states.get(proposed, None) + if existing is not None: + return existing + + newState = proposed + + newState.stateNumber = len(dfa.states) + configs.setReadonly(True) + newState.configs = configs + dfa.states[newState] = newState + return newState + + def getDFA(self, mode:int): + return self.decisionToDFA[mode] + + # Get the text matched so far for the current token. + def getText(self, input:InputStream): + # index is first lookahead char, don't include. + return input.getText(self.startIndex, input.index-1) + + def consume(self, input:InputStream): + curChar = input.LA(1) + if curChar==ord('\n'): + self.line += 1 + self.column = 0 + else: + self.column += 1 + input.consume() + + def getTokenName(self, t:int): + if t==-1: + return "EOF" + else: + return "'" + chr(t) + "'" + + +LexerATNSimulator.ERROR = DFAState(0x7FFFFFFF, ATNConfigSet()) + +del Lexer diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerAction.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerAction.py new file mode 100644 index 0000000000000000000000000000000000000000..0fa7a895f31cea4415b71def30ca1ec90167f1b3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerAction.py @@ -0,0 +1,298 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. + # + +from enum import IntEnum + +# need forward declaration +Lexer = None + + +class LexerActionType(IntEnum): + + CHANNEL = 0 #The type of a {@link LexerChannelAction} action. + CUSTOM = 1 #The type of a {@link LexerCustomAction} action. + MODE = 2 #The type of a {@link LexerModeAction} action. + MORE = 3 #The type of a {@link LexerMoreAction} action. + POP_MODE = 4 #The type of a {@link LexerPopModeAction} action. + PUSH_MODE = 5 #The type of a {@link LexerPushModeAction} action. + SKIP = 6 #The type of a {@link LexerSkipAction} action. + TYPE = 7 #The type of a {@link LexerTypeAction} action. + +class LexerAction(object): + __slots__ = ('actionType', 'isPositionDependent') + + def __init__(self, action:LexerActionType): + self.actionType = action + self.isPositionDependent = False + + def __hash__(self): + return hash(self.actionType) + + def __eq__(self, other): + return self is other + + +# +# Implements the {@code skip} lexer action by calling {@link Lexer#skip}. +# +#

The {@code skip} command does not have any parameters, so this action is +# implemented as a singleton instance exposed by {@link #INSTANCE}.

+class LexerSkipAction(LexerAction): + + # Provides a singleton instance of this parameterless lexer action. + INSTANCE = None + + def __init__(self): + super().__init__(LexerActionType.SKIP) + + def execute(self, lexer:Lexer): + lexer.skip() + + def __str__(self): + return "skip" + +LexerSkipAction.INSTANCE = LexerSkipAction() + +# Implements the {@code type} lexer action by calling {@link Lexer#setType} +# with the assigned type. +class LexerTypeAction(LexerAction): + __slots__ = 'type' + + def __init__(self, type:int): + super().__init__(LexerActionType.TYPE) + self.type = type + + def execute(self, lexer:Lexer): + lexer.type = self.type + + def __hash__(self): + return hash((self.actionType, self.type)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerTypeAction): + return False + else: + return self.type == other.type + + def __str__(self): + return "type(" + str(self.type) + ")" + + +# Implements the {@code pushMode} lexer action by calling +# {@link Lexer#pushMode} with the assigned mode. +class LexerPushModeAction(LexerAction): + __slots__ = 'mode' + + def __init__(self, mode:int): + super().__init__(LexerActionType.PUSH_MODE) + self.mode = mode + + #

This action is implemented by calling {@link Lexer#pushMode} with the + # value provided by {@link #getMode}.

+ def execute(self, lexer:Lexer): + lexer.pushMode(self.mode) + + def __hash__(self): + return hash((self.actionType, self.mode)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerPushModeAction): + return False + else: + return self.mode == other.mode + + def __str__(self): + return "pushMode(" + str(self.mode) + ")" + + +# Implements the {@code popMode} lexer action by calling {@link Lexer#popMode}. +# +#

The {@code popMode} command does not have any parameters, so this action is +# implemented as a singleton instance exposed by {@link #INSTANCE}.

+class LexerPopModeAction(LexerAction): + + INSTANCE = None + + def __init__(self): + super().__init__(LexerActionType.POP_MODE) + + #

This action is implemented by calling {@link Lexer#popMode}.

+ def execute(self, lexer:Lexer): + lexer.popMode() + + def __str__(self): + return "popMode" + +LexerPopModeAction.INSTANCE = LexerPopModeAction() + +# Implements the {@code more} lexer action by calling {@link Lexer#more}. +# +#

The {@code more} command does not have any parameters, so this action is +# implemented as a singleton instance exposed by {@link #INSTANCE}.

+class LexerMoreAction(LexerAction): + + INSTANCE = None + + def __init__(self): + super().__init__(LexerActionType.MORE) + + #

This action is implemented by calling {@link Lexer#popMode}.

+ def execute(self, lexer:Lexer): + lexer.more() + + def __str__(self): + return "more" + +LexerMoreAction.INSTANCE = LexerMoreAction() + +# Implements the {@code mode} lexer action by calling {@link Lexer#mode} with +# the assigned mode. +class LexerModeAction(LexerAction): + __slots__ = 'mode' + + def __init__(self, mode:int): + super().__init__(LexerActionType.MODE) + self.mode = mode + + #

This action is implemented by calling {@link Lexer#mode} with the + # value provided by {@link #getMode}.

+ def execute(self, lexer:Lexer): + lexer.mode(self.mode) + + def __hash__(self): + return hash((self.actionType, self.mode)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerModeAction): + return False + else: + return self.mode == other.mode + + def __str__(self): + return "mode(" + str(self.mode) + ")" + +# Executes a custom lexer action by calling {@link Recognizer#action} with the +# rule and action indexes assigned to the custom action. The implementation of +# a custom action is added to the generated code for the lexer in an override +# of {@link Recognizer#action} when the grammar is compiled. +# +#

This class may represent embedded actions created with the {...} +# syntax in ANTLR 4, as well as actions created for lexer commands where the +# command argument could not be evaluated when the grammar was compiled.

+ +class LexerCustomAction(LexerAction): + __slots__ = ('ruleIndex', 'actionIndex') + + # Constructs a custom lexer action with the specified rule and action + # indexes. + # + # @param ruleIndex The rule index to use for calls to + # {@link Recognizer#action}. + # @param actionIndex The action index to use for calls to + # {@link Recognizer#action}. + #/ + def __init__(self, ruleIndex:int, actionIndex:int): + super().__init__(LexerActionType.CUSTOM) + self.ruleIndex = ruleIndex + self.actionIndex = actionIndex + self.isPositionDependent = True + + #

Custom actions are implemented by calling {@link Lexer#action} with the + # appropriate rule and action indexes.

+ def execute(self, lexer:Lexer): + lexer.action(None, self.ruleIndex, self.actionIndex) + + def __hash__(self): + return hash((self.actionType, self.ruleIndex, self.actionIndex)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerCustomAction): + return False + else: + return self.ruleIndex == other.ruleIndex and self.actionIndex == other.actionIndex + +# Implements the {@code channel} lexer action by calling +# {@link Lexer#setChannel} with the assigned channel. +class LexerChannelAction(LexerAction): + __slots__ = 'channel' + + # Constructs a new {@code channel} action with the specified channel value. + # @param channel The channel value to pass to {@link Lexer#setChannel}. + def __init__(self, channel:int): + super().__init__(LexerActionType.CHANNEL) + self.channel = channel + + #

This action is implemented by calling {@link Lexer#setChannel} with the + # value provided by {@link #getChannel}.

+ def execute(self, lexer:Lexer): + lexer._channel = self.channel + + def __hash__(self): + return hash((self.actionType, self.channel)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerChannelAction): + return False + else: + return self.channel == other.channel + + def __str__(self): + return "channel(" + str(self.channel) + ")" + +# This implementation of {@link LexerAction} is used for tracking input offsets +# for position-dependent actions within a {@link LexerActionExecutor}. +# +#

This action is not serialized as part of the ATN, and is only required for +# position-dependent lexer actions which appear at a location other than the +# end of a rule. For more information about DFA optimizations employed for +# lexer actions, see {@link LexerActionExecutor#append} and +# {@link LexerActionExecutor#fixOffsetBeforeMatch}.

+class LexerIndexedCustomAction(LexerAction): + __slots__ = ('offset', 'action') + + # Constructs a new indexed custom action by associating a character offset + # with a {@link LexerAction}. + # + #

Note: This class is only required for lexer actions for which + # {@link LexerAction#isPositionDependent} returns {@code true}.

+ # + # @param offset The offset into the input {@link CharStream}, relative to + # the token start index, at which the specified lexer action should be + # executed. + # @param action The lexer action to execute at a particular offset in the + # input {@link CharStream}. + def __init__(self, offset:int, action:LexerAction): + super().__init__(action.actionType) + self.offset = offset + self.action = action + self.isPositionDependent = True + + #

This method calls {@link #execute} on the result of {@link #getAction} + # using the provided {@code lexer}.

+ def execute(self, lexer:Lexer): + # assume the input stream position was properly set by the calling code + self.action.execute(lexer) + + def __hash__(self): + return hash((self.actionType, self.offset, self.action)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerIndexedCustomAction): + return False + else: + return self.offset == other.offset and self.action == other.action diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerActionExecutor.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerActionExecutor.py new file mode 100644 index 0000000000000000000000000000000000000000..5c6462c3a28f4ccbcb0a65dcde96f497dd392416 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerActionExecutor.py @@ -0,0 +1,143 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ + +# Represents an executor for a sequence of lexer actions which traversed during +# the matching operation of a lexer rule (token). +# +#

The executor tracks position information for position-dependent lexer actions +# efficiently, ensuring that actions appearing only at the end of the rule do +# not cause bloating of the {@link DFA} created for the lexer.

+ + +from antlr4.InputStream import InputStream +from antlr4.atn.LexerAction import LexerAction, LexerIndexedCustomAction + +# need a forward declaration +Lexer = None +LexerActionExecutor = None + +class LexerActionExecutor(object): + __slots__ = ('lexerActions', 'hashCode') + + def __init__(self, lexerActions:list=list()): + self.lexerActions = lexerActions + # Caches the result of {@link #hashCode} since the hash code is an element + # of the performance-critical {@link LexerATNConfig#hashCode} operation. + self.hashCode = hash("".join([str(la) for la in lexerActions])) + + + # Creates a {@link LexerActionExecutor} which executes the actions for + # the input {@code lexerActionExecutor} followed by a specified + # {@code lexerAction}. + # + # @param lexerActionExecutor The executor for actions already traversed by + # the lexer while matching a token within a particular + # {@link LexerATNConfig}. If this is {@code null}, the method behaves as + # though it were an empty executor. + # @param lexerAction The lexer action to execute after the actions + # specified in {@code lexerActionExecutor}. + # + # @return A {@link LexerActionExecutor} for executing the combine actions + # of {@code lexerActionExecutor} and {@code lexerAction}. + @staticmethod + def append(lexerActionExecutor:LexerActionExecutor , lexerAction:LexerAction ): + if lexerActionExecutor is None: + return LexerActionExecutor([ lexerAction ]) + + lexerActions = lexerActionExecutor.lexerActions + [ lexerAction ] + return LexerActionExecutor(lexerActions) + + # Creates a {@link LexerActionExecutor} which encodes the current offset + # for position-dependent lexer actions. + # + #

Normally, when the executor encounters lexer actions where + # {@link LexerAction#isPositionDependent} returns {@code true}, it calls + # {@link IntStream#seek} on the input {@link CharStream} to set the input + # position to the end of the current token. This behavior provides + # for efficient DFA representation of lexer actions which appear at the end + # of a lexer rule, even when the lexer rule matches a variable number of + # characters.

+ # + #

Prior to traversing a match transition in the ATN, the current offset + # from the token start index is assigned to all position-dependent lexer + # actions which have not already been assigned a fixed offset. By storing + # the offsets relative to the token start index, the DFA representation of + # lexer actions which appear in the middle of tokens remains efficient due + # to sharing among tokens of the same length, regardless of their absolute + # position in the input stream.

+ # + #

If the current executor already has offsets assigned to all + # position-dependent lexer actions, the method returns {@code this}.

+ # + # @param offset The current offset to assign to all position-dependent + # lexer actions which do not already have offsets assigned. + # + # @return A {@link LexerActionExecutor} which stores input stream offsets + # for all position-dependent lexer actions. + #/ + def fixOffsetBeforeMatch(self, offset:int): + updatedLexerActions = None + for i in range(0, len(self.lexerActions)): + if self.lexerActions[i].isPositionDependent and not isinstance(self.lexerActions[i], LexerIndexedCustomAction): + if updatedLexerActions is None: + updatedLexerActions = [ la for la in self.lexerActions ] + updatedLexerActions[i] = LexerIndexedCustomAction(offset, self.lexerActions[i]) + + if updatedLexerActions is None: + return self + else: + return LexerActionExecutor(updatedLexerActions) + + + # Execute the actions encapsulated by this executor within the context of a + # particular {@link Lexer}. + # + #

This method calls {@link IntStream#seek} to set the position of the + # {@code input} {@link CharStream} prior to calling + # {@link LexerAction#execute} on a position-dependent action. Before the + # method returns, the input position will be restored to the same position + # it was in when the method was invoked.

+ # + # @param lexer The lexer instance. + # @param input The input stream which is the source for the current token. + # When this method is called, the current {@link IntStream#index} for + # {@code input} should be the start of the following token, i.e. 1 + # character past the end of the current token. + # @param startIndex The token start index. This value may be passed to + # {@link IntStream#seek} to set the {@code input} position to the beginning + # of the token. + #/ + def execute(self, lexer:Lexer, input:InputStream, startIndex:int): + requiresSeek = False + stopIndex = input.index + try: + for lexerAction in self.lexerActions: + if isinstance(lexerAction, LexerIndexedCustomAction): + offset = lexerAction.offset + input.seek(startIndex + offset) + lexerAction = lexerAction.action + requiresSeek = (startIndex + offset) != stopIndex + elif lexerAction.isPositionDependent: + input.seek(stopIndex) + requiresSeek = False + lexerAction.execute(lexer) + finally: + if requiresSeek: + input.seek(stopIndex) + + def __hash__(self): + return self.hashCode + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerActionExecutor): + return False + else: + return self.hashCode == other.hashCode \ + and self.lexerActions == other.lexerActions + +del Lexer diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ParserATNSimulator.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ParserATNSimulator.py new file mode 100644 index 0000000000000000000000000000000000000000..d1fb3d7ed8bab47fb851d85da88d20dc39715627 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ParserATNSimulator.py @@ -0,0 +1,1649 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# +# The embodiment of the adaptive LL(*), ALL(*), parsing strategy. +# +#

+# The basic complexity of the adaptive strategy makes it harder to understand. +# We begin with ATN simulation to build paths in a DFA. Subsequent prediction +# requests go through the DFA first. If they reach a state without an edge for +# the current symbol, the algorithm fails over to the ATN simulation to +# complete the DFA path for the current input (until it finds a conflict state +# or uniquely predicting state).

+# +#

+# All of that is done without using the outer context because we want to create +# a DFA that is not dependent upon the rule invocation stack when we do a +# prediction. One DFA works in all contexts. We avoid using context not +# necessarily because it's slower, although it can be, but because of the DFA +# caching problem. The closure routine only considers the rule invocation stack +# created during prediction beginning in the decision rule. For example, if +# prediction occurs without invoking another rule's ATN, there are no context +# stacks in the configurations. When lack of context leads to a conflict, we +# don't know if it's an ambiguity or a weakness in the strong LL(*) parsing +# strategy (versus full LL(*)).

+# +#

+# When SLL yields a configuration set with conflict, we rewind the input and +# retry the ATN simulation, this time using full outer context without adding +# to the DFA. Configuration context stacks will be the full invocation stacks +# from the start rule. If we get a conflict using full context, then we can +# definitively say we have a true ambiguity for that input sequence. If we +# don't get a conflict, it implies that the decision is sensitive to the outer +# context. (It is not context-sensitive in the sense of context-sensitive +# grammars.)

+# +#

+# The next time we reach this DFA state with an SLL conflict, through DFA +# simulation, we will again retry the ATN simulation using full context mode. +# This is slow because we can't save the results and have to "interpret" the +# ATN each time we get that input.

+# +#

+# CACHING FULL CONTEXT PREDICTIONS

+# +#

+# We could cache results from full context to predicted alternative easily and +# that saves a lot of time but doesn't work in presence of predicates. The set +# of visible predicates from the ATN start state changes depending on the +# context, because closure can fall off the end of a rule. I tried to cache +# tuples (stack context, semantic context, predicted alt) but it was slower +# than interpreting and much more complicated. Also required a huge amount of +# memory. The goal is not to create the world's fastest parser anyway. I'd like +# to keep this algorithm simple. By launching multiple threads, we can improve +# the speed of parsing across a large number of files.

+# +#

+# There is no strict ordering between the amount of input used by SLL vs LL, +# which makes it really hard to build a cache for full context. Let's say that +# we have input A B C that leads to an SLL conflict with full context X. That +# implies that using X we might only use A B but we could also use A B C D to +# resolve conflict. Input A B C D could predict alternative 1 in one position +# in the input and A B C E could predict alternative 2 in another position in +# input. The conflicting SLL configurations could still be non-unique in the +# full context prediction, which would lead us to requiring more input than the +# original A B C. To make a prediction cache work, we have to track the exact +# input used during the previous prediction. That amounts to a cache that maps +# X to a specific DFA for that context.

+# +#

+# Something should be done for left-recursive expression predictions. They are +# likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry +# with full LL thing Sam does.

+# +#

+# AVOIDING FULL CONTEXT PREDICTION

+# +#

+# We avoid doing full context retry when the outer context is empty, we did not +# dip into the outer context by falling off the end of the decision state rule, +# or when we force SLL mode.

+# +#

+# As an example of the not dip into outer context case, consider as super +# constructor calls versus function calls. One grammar might look like +# this:

+# +#
+# ctorBody
+#   : '{' superCall? stat* '}'
+#   ;
+# 
+# +#

+# Or, you might see something like

+# +#
+# stat
+#   : superCall ';'
+#   | expression ';'
+#   | ...
+#   ;
+# 
+# +#

+# In both cases I believe that no closure operations will dip into the outer +# context. In the first case ctorBody in the worst case will stop at the '}'. +# In the 2nd case it should stop at the ';'. Both cases should stay within the +# entry rule and not dip into the outer context.

+# +#

+# PREDICATES

+# +#

+# Predicates are always evaluated if present in either SLL or LL both. SLL and +# LL simulation deals with predicates differently. SLL collects predicates as +# it performs closure operations like ANTLR v3 did. It delays predicate +# evaluation until it reaches and accept state. This allows us to cache the SLL +# ATN simulation whereas, if we had evaluated predicates on-the-fly during +# closure, the DFA state configuration sets would be different and we couldn't +# build up a suitable DFA.

+# +#

+# When building a DFA accept state during ATN simulation, we evaluate any +# predicates and return the sole semantically valid alternative. If there is +# more than 1 alternative, we report an ambiguity. If there are 0 alternatives, +# we throw an exception. Alternatives without predicates act like they have +# true predicates. The simple way to think about it is to strip away all +# alternatives with false predicates and choose the minimum alternative that +# remains.

+# +#

+# When we start in the DFA and reach an accept state that's predicated, we test +# those and return the minimum semantically viable alternative. If no +# alternatives are viable, we throw an exception.

+# +#

+# During full LL ATN simulation, closure always evaluates predicates and +# on-the-fly. This is crucial to reducing the configuration set size during +# closure. It hits a landmine when parsing with the Java grammar, for example, +# without this on-the-fly evaluation.

+# +#

+# SHARING DFA

+# +#

+# All instances of the same parser share the same decision DFAs through a +# static field. Each instance gets its own ATN simulator but they share the +# same {@link #decisionToDFA} field. They also share a +# {@link PredictionContextCache} object that makes sure that all +# {@link PredictionContext} objects are shared among the DFA states. This makes +# a big size difference.

+# +#

+# THREAD SAFETY

+# +#

+# The {@link ParserATNSimulator} locks on the {@link #decisionToDFA} field when +# it adds a new DFA object to that array. {@link #addDFAEdge} +# locks on the DFA for the current decision when setting the +# {@link DFAState#edges} field. {@link #addDFAState} locks on +# the DFA for the current decision when looking up a DFA state to see if it +# already exists. We must make sure that all requests to add DFA states that +# are equivalent result in the same shared DFA object. This is because lots of +# threads will be trying to update the DFA at once. The +# {@link #addDFAState} method also locks inside the DFA lock +# but this time on the shared context cache when it rebuilds the +# configurations' {@link PredictionContext} objects using cached +# subgraphs/nodes. No other locking occurs, even during DFA simulation. This is +# safe as long as we can guarantee that all threads referencing +# {@code s.edge[t]} get the same physical target {@link DFAState}, or +# {@code null}. Once into the DFA, the DFA simulation does not reference the +# {@link DFA#states} map. It follows the {@link DFAState#edges} field to new +# targets. The DFA simulator will either find {@link DFAState#edges} to be +# {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or +# {@code dfa.edges[t]} to be non-null. The +# {@link #addDFAEdge} method could be racing to set the field +# but in either case the DFA simulator works; if {@code null}, and requests ATN +# simulation. It could also race trying to get {@code dfa.edges[t]}, but either +# way it will work because it's not doing a test and set operation.

+# +#

+# Starting with SLL then failing to combined SLL/LL (Two-Stage +# Parsing)

+# +#

+# Sam pointed out that if SLL does not give a syntax error, then there is no +# point in doing full LL, which is slower. We only have to try LL if we get a +# syntax error. For maximum speed, Sam starts the parser set to pure SLL +# mode with the {@link BailErrorStrategy}:

+# +#
+# parser.{@link Parser#getInterpreter() getInterpreter()}.{@link #setPredictionMode setPredictionMode}{@code (}{@link PredictionMode#SLL}{@code )};
+# parser.{@link Parser#setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}());
+# 
+# +#

+# If it does not get a syntax error, then we're done. If it does get a syntax +# error, we need to retry with the combined SLL/LL strategy.

+# +#

+# The reason this works is as follows. If there are no SLL conflicts, then the +# grammar is SLL (at least for that input set). If there is an SLL conflict, +# the full LL analysis must yield a set of viable alternatives which is a +# subset of the alternatives reported by SLL. If the LL set is a singleton, +# then the grammar is LL but not SLL. If the LL set is the same size as the SLL +# set, the decision is SLL. If the LL set has size > 1, then that decision +# is truly ambiguous on the current input. If the LL set is smaller, then the +# SLL conflict resolution might choose an alternative that the full LL would +# rule out as a possibility based upon better context information. If that's +# the case, then the SLL parse will definitely get an error because the full LL +# analysis says it's not viable. If SLL conflict resolution chooses an +# alternative within the LL set, them both SLL and LL would choose the same +# alternative because they both choose the minimum of multiple conflicting +# alternatives.

+# +#

+# Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and +# a smaller LL set called s. If s is {@code {2, 3}}, then SLL +# parsing will get an error because SLL will pursue alternative 1. If +# s is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will +# choose the same alternative because alternative one is the minimum of either +# set. If s is {@code {2}} or {@code {3}} then SLL will get a syntax +# error. If s is {@code {1}} then SLL will succeed.

+# +#

+# Of course, if the input is invalid, then we will get an error for sure in +# both SLL and LL parsing. Erroneous input will therefore require 2 passes over +# the input.

+# +import sys +from antlr4 import DFA +from antlr4.PredictionContext import PredictionContextCache, PredictionContext, SingletonPredictionContext, \ + PredictionContextFromRuleContext +from antlr4.BufferedTokenStream import TokenStream +from antlr4.Parser import Parser +from antlr4.ParserRuleContext import ParserRuleContext +from antlr4.RuleContext import RuleContext +from antlr4.Token import Token +from antlr4.Utils import str_list +from antlr4.atn.ATN import ATN +from antlr4.atn.ATNConfig import ATNConfig +from antlr4.atn.ATNConfigSet import ATNConfigSet +from antlr4.atn.ATNSimulator import ATNSimulator +from antlr4.atn.ATNState import StarLoopEntryState, DecisionState, RuleStopState, ATNState +from antlr4.atn.PredictionMode import PredictionMode +from antlr4.atn.SemanticContext import SemanticContext, AND, andContext, orContext +from antlr4.atn.Transition import Transition, RuleTransition, ActionTransition, PrecedencePredicateTransition, \ + PredicateTransition, AtomTransition, SetTransition, NotSetTransition +from antlr4.dfa.DFAState import DFAState, PredPrediction +from antlr4.error.Errors import NoViableAltException + + +class ParserATNSimulator(ATNSimulator): + __slots__ = ( + 'parser', 'decisionToDFA', 'predictionMode', '_input', '_startIndex', + '_outerContext', '_dfa', 'mergeCache' + ) + + debug = False + debug_list_atn_decisions = False + dfa_debug = False + retry_debug = False + + + def __init__(self, parser:Parser, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache): + super().__init__(atn, sharedContextCache) + self.parser = parser + self.decisionToDFA = decisionToDFA + # SLL, LL, or LL + exact ambig detection?# + self.predictionMode = PredictionMode.LL + # LAME globals to avoid parameters!!!!! I need these down deep in predTransition + self._input = None + self._startIndex = 0 + self._outerContext = None + self._dfa = None + # Each prediction operation uses a cache for merge of prediction contexts. + # Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + # isn't synchronized but we're ok since two threads shouldn't reuse same + # parser/atnsim object because it can only handle one input at a time. + # This maps graphs a and b to merged result c. (a,b)→c. We can avoid + # the merge if we ever see a and b again. Note that (b,a)→c should + # also be examined during cache lookup. + # + self.mergeCache = None + + + def reset(self): + pass + + def adaptivePredict(self, input:TokenStream, decision:int, outerContext:ParserRuleContext): + if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions: + print("adaptivePredict decision " + str(decision) + + " exec LA(1)==" + self.getLookaheadName(input) + + " line " + str(input.LT(1).line) + ":" + + str(input.LT(1).column)) + self._input = input + self._startIndex = input.index + self._outerContext = outerContext + + dfa = self.decisionToDFA[decision] + self._dfa = dfa + m = input.mark() + index = input.index + + # Now we are certain to have a specific decision's DFA + # But, do we still need an initial state? + try: + if dfa.precedenceDfa: + # the start state for a precedence DFA depends on the current + # parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(self.parser.getPrecedence()) + else: + # the start state for a "regular" DFA is just s0 + s0 = dfa.s0 + + if s0 is None: + if outerContext is None: + outerContext = ParserRuleContext.EMPTY + if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions: + print("predictATN decision " + str(dfa.decision) + + " exec LA(1)==" + self.getLookaheadName(input) + + ", outerContext=" + outerContext.toString(self.parser.literalNames, None)) + + fullCtx = False + s0_closure = self.computeStartState(dfa.atnStartState, ParserRuleContext.EMPTY, fullCtx) + + if dfa.precedenceDfa: + # If this is a precedence DFA, we use applyPrecedenceFilter + # to convert the computed start state to a precedence start + # state. We then use DFA.setPrecedenceStartState to set the + # appropriate start state for the precedence level rather + # than simply setting DFA.s0. + # + dfa.s0.configs = s0_closure # not used for prediction but useful to know start configs anyway + s0_closure = self.applyPrecedenceFilter(s0_closure) + s0 = self.addDFAState(dfa, DFAState(configs=s0_closure)) + dfa.setPrecedenceStartState(self.parser.getPrecedence(), s0) + else: + s0 = self.addDFAState(dfa, DFAState(configs=s0_closure)) + dfa.s0 = s0 + + alt = self.execATN(dfa, s0, input, index, outerContext) + if ParserATNSimulator.debug: + print("DFA after predictATN: " + dfa.toString(self.parser.literalNames)) + return alt + finally: + self._dfa = None + self.mergeCache = None # wack cache after each prediction + input.seek(index) + input.release(m) + + # Performs ATN simulation to compute a predicted alternative based + # upon the remaining input, but also updates the DFA cache to avoid + # having to traverse the ATN again for the same input sequence. + + # There are some key conditions we're looking for after computing a new + # set of ATN configs (proposed DFA state): + # if the set is empty, there is no viable alternative for current symbol + # does the state uniquely predict an alternative? + # does the state have a conflict that would prevent us from + # putting it on the work list? + + # We also have some key operations to do: + # add an edge from previous DFA state to potentially new DFA state, D, + # upon current symbol but only if adding to work list, which means in all + # cases except no viable alternative (and possibly non-greedy decisions?) + # collecting predicates and adding semantic context to DFA accept states + # adding rule context to context-sensitive DFA accept states + # consuming an input symbol + # reporting a conflict + # reporting an ambiguity + # reporting a context sensitivity + # reporting insufficient predicates + + # cover these cases: + # dead end + # single alt + # single alt + preds + # conflict + # conflict + preds + # + def execATN(self, dfa:DFA, s0:DFAState, input:TokenStream, startIndex:int, outerContext:ParserRuleContext ): + if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions: + print("execATN decision " + str(dfa.decision) + + " exec LA(1)==" + self.getLookaheadName(input) + + " line " + str(input.LT(1).line) + ":" + str(input.LT(1).column)) + + previousD = s0 + + if ParserATNSimulator.debug: + print("s0 = " + str(s0)) + + t = input.LA(1) + + while True: # while more work + D = self.getExistingTargetState(previousD, t) + if D is None: + D = self.computeTargetState(dfa, previousD, t) + if D is self.ERROR: + # if any configs in previous dipped into outer context, that + # means that input up to t actually finished entry rule + # at least for SLL decision. Full LL doesn't dip into outer + # so don't need special case. + # We will get an error no matter what so delay until after + # decision; better error message. Also, no reachable target + # ATN states in SLL implies LL will also get nowhere. + # If conflict in states that dip out, choose min since we + # will get error no matter what. + e = self.noViableAlt(input, outerContext, previousD.configs, startIndex) + input.seek(startIndex) + alt = self.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + if alt!=ATN.INVALID_ALT_NUMBER: + return alt + raise e + + if D.requiresFullContext and self.predictionMode != PredictionMode.SLL: + # IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + conflictingAlts = D.configs.conflictingAlts + if D.predicates is not None: + if ParserATNSimulator.debug: + print("DFA state has preds in DFA sim LL failover") + conflictIndex = input.index + if conflictIndex != startIndex: + input.seek(startIndex) + + conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True) + if len(conflictingAlts)==1: + if ParserATNSimulator.debug: + print("Full LL avoided") + return min(conflictingAlts) + + if conflictIndex != startIndex: + # restore the index so reporting the fallback to full + # context occurs with the index at the correct spot + input.seek(conflictIndex) + + if ParserATNSimulator.dfa_debug: + print("ctx sensitive state " + str(outerContext) +" in " + str(D)) + fullCtx = True + s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx) + self.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index) + alt = self.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext) + return alt + + if D.isAcceptState: + if D.predicates is None: + return D.prediction + + stopIndex = input.index + input.seek(startIndex) + alts = self.evalSemanticContext(D.predicates, outerContext, True) + if len(alts)==0: + raise self.noViableAlt(input, outerContext, D.configs, startIndex) + elif len(alts)==1: + return min(alts) + else: + # report ambiguity after predicate evaluation to make sure the correct + # set of ambig alts is reported. + self.reportAmbiguity(dfa, D, startIndex, stopIndex, False, alts, D.configs) + return min(alts) + + previousD = D + + if t != Token.EOF: + input.consume() + t = input.LA(1) + + # + # Get an existing target state for an edge in the DFA. If the target state + # for the edge has not yet been computed or is otherwise not available, + # this method returns {@code null}. + # + # @param previousD The current DFA state + # @param t The next input symbol + # @return The existing target DFA state for the given input symbol + # {@code t}, or {@code null} if the target state for this edge is not + # already cached + # + def getExistingTargetState(self, previousD:DFAState, t:int): + edges = previousD.edges + if edges is None or t + 1 < 0 or t + 1 >= len(edges): + return None + else: + return edges[t + 1] + + # + # Compute a target state for an edge in the DFA, and attempt to add the + # computed state and corresponding edge to the DFA. + # + # @param dfa The DFA + # @param previousD The current DFA state + # @param t The next input symbol + # + # @return The computed target DFA state for the given input symbol + # {@code t}. If {@code t} does not lead to a valid DFA state, this method + # returns {@link #ERROR}. + # + def computeTargetState(self, dfa:DFA, previousD:DFAState, t:int): + reach = self.computeReachSet(previousD.configs, t, False) + if reach is None: + self.addDFAEdge(dfa, previousD, t, self.ERROR) + return self.ERROR + + # create new target state; we'll add to DFA after it's complete + D = DFAState(configs=reach) + + predictedAlt = self.getUniqueAlt(reach) + + if ParserATNSimulator.debug: + altSubSets = PredictionMode.getConflictingAltSubsets(reach) + print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) + + ", predict=" + str(predictedAlt) + ", allSubsetsConflict=" + + str(PredictionMode.allSubsetsConflict(altSubSets)) + ", conflictingAlts=" + + str(self.getConflictingAlts(reach))) + + if predictedAlt!=ATN.INVALID_ALT_NUMBER: + # NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = True + D.configs.uniqueAlt = predictedAlt + D.prediction = predictedAlt + elif PredictionMode.hasSLLConflictTerminatingPrediction(self.predictionMode, reach): + # MORE THAN ONE VIABLE ALTERNATIVE + D.configs.conflictingAlts = self.getConflictingAlts(reach) + D.requiresFullContext = True + # in SLL-only mode, we will stop at this state and return the minimum alt + D.isAcceptState = True + D.prediction = min(D.configs.conflictingAlts) + + if D.isAcceptState and D.configs.hasSemanticContext: + self.predicateDFAState(D, self.atn.getDecisionState(dfa.decision)) + if D.predicates is not None: + D.prediction = ATN.INVALID_ALT_NUMBER + + # all adds to dfa are done after we've created full D state + D = self.addDFAEdge(dfa, previousD, t, D) + return D + + def predicateDFAState(self, dfaState:DFAState, decisionState:DecisionState): + # We need to test all predicates, even in DFA states that + # uniquely predict alternative. + nalts = len(decisionState.transitions) + # Update DFA so reach becomes accept state with (predicate,alt) + # pairs if preds found for conflicting alts + altsToCollectPredsFrom = self.getConflictingAltsOrUniqueAlt(dfaState.configs) + altToPred = self.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) + if altToPred is not None: + dfaState.predicates = self.getPredicatePredictions(altsToCollectPredsFrom, altToPred) + dfaState.prediction = ATN.INVALID_ALT_NUMBER # make sure we use preds + else: + # There are preds in configs but they might go away + # when OR'd together like {p}? || NONE == NONE. If neither + # alt has preds, resolve to min alt + dfaState.prediction = min(altsToCollectPredsFrom) + + # comes back with reach.uniqueAlt set to a valid alt + def execATNWithFullContext(self, dfa:DFA, D:DFAState, # how far we got before failing over + s0:ATNConfigSet, + input:TokenStream, + startIndex:int, + outerContext:ParserRuleContext): + if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions: + print("execATNWithFullContext", str(s0)) + fullCtx = True + foundExactAmbig = False + reach = None + previous = s0 + input.seek(startIndex) + t = input.LA(1) + predictedAlt = -1 + while (True): # while more work + reach = self.computeReachSet(previous, t, fullCtx) + if reach is None: + # if any configs in previous dipped into outer context, that + # means that input up to t actually finished entry rule + # at least for LL decision. Full LL doesn't dip into outer + # so don't need special case. + # We will get an error no matter what so delay until after + # decision; better error message. Also, no reachable target + # ATN states in SLL implies LL will also get nowhere. + # If conflict in states that dip out, choose min since we + # will get error no matter what. + e = self.noViableAlt(input, outerContext, previous, startIndex) + input.seek(startIndex) + alt = self.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + if alt!=ATN.INVALID_ALT_NUMBER: + return alt + else: + raise e + + altSubSets = PredictionMode.getConflictingAltSubsets(reach) + if ParserATNSimulator.debug: + print("LL altSubSets=" + str(altSubSets) + ", predict=" + + str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + + str(PredictionMode.resolvesToJustOneViableAlt(altSubSets))) + + reach.uniqueAlt = self.getUniqueAlt(reach) + # unique prediction? + if reach.uniqueAlt!=ATN.INVALID_ALT_NUMBER: + predictedAlt = reach.uniqueAlt + break + elif self.predictionMode is not PredictionMode.LL_EXACT_AMBIG_DETECTION: + predictedAlt = PredictionMode.resolvesToJustOneViableAlt(altSubSets) + if predictedAlt != ATN.INVALID_ALT_NUMBER: + break + else: + # In exact ambiguity mode, we never try to terminate early. + # Just keeps scarfing until we know what the conflict is + if PredictionMode.allSubsetsConflict(altSubSets) and PredictionMode.allSubsetsEqual(altSubSets): + foundExactAmbig = True + predictedAlt = PredictionMode.getSingleViableAlt(altSubSets) + break + # else there are multiple non-conflicting subsets or + # we're not sure what the ambiguity is yet. + # So, keep going. + + previous = reach + if t != Token.EOF: + input.consume() + t = input.LA(1) + + # If the configuration set uniquely predicts an alternative, + # without conflict, then we know that it's a full LL decision + # not SLL. + if reach.uniqueAlt != ATN.INVALID_ALT_NUMBER : + self.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index) + return predictedAlt + + # We do not check predicates here because we have checked them + # on-the-fly when doing full context prediction. + + # + # In non-exact ambiguity detection mode, we might actually be able to + # detect an exact ambiguity, but I'm not going to spend the cycles + # needed to check. We only emit ambiguity warnings in exact ambiguity + # mode. + # + # For example, we might know that we have conflicting configurations. + # But, that does not mean that there is no way forward without a + # conflict. It's possible to have nonconflicting alt subsets as in: + + # altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + + # from + # + # [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + # (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + # + # In this case, (17,1,[5 $]) indicates there is some next sequence that + # would resolve this without conflict to alternative 1. Any other viable + # next sequence, however, is associated with a conflict. We stop + # looking for input because no amount of further lookahead will alter + # the fact that we should predict alternative 1. We just can't say for + # sure that there is an ambiguity without looking further. + + self.reportAmbiguity(dfa, D, startIndex, input.index, foundExactAmbig, None, reach) + + return predictedAlt + + def computeReachSet(self, closure:ATNConfigSet, t:int, fullCtx:bool): + if ParserATNSimulator.debug: + print("in computeReachSet, starting closure: " + str(closure)) + + if self.mergeCache is None: + self.mergeCache = dict() + + intermediate = ATNConfigSet(fullCtx) + + # Configurations already in a rule stop state indicate reaching the end + # of the decision rule (local context) or end of the start rule (full + # context). Once reached, these configurations are never updated by a + # closure operation, so they are handled separately for the performance + # advantage of having a smaller intermediate set when calling closure. + # + # For full-context reach operations, separate handling is required to + # ensure that the alternative matching the longest overall sequence is + # chosen when multiple such configurations can match the input. + + skippedStopStates = None + + # First figure out where we can reach on input t + for c in closure: + if ParserATNSimulator.debug: + print("testing " + self.getTokenName(t) + " at " + str(c)) + + if isinstance(c.state, RuleStopState): + if fullCtx or t == Token.EOF: + if skippedStopStates is None: + skippedStopStates = list() + skippedStopStates.append(c) + continue + + for trans in c.state.transitions: + target = self.getReachableTarget(trans, t) + if target is not None: + intermediate.add(ATNConfig(state=target, config=c), self.mergeCache) + + # Now figure out where the reach operation can take us... + + reach = None + + # This block optimizes the reach operation for intermediate sets which + # trivially indicate a termination state for the overall + # adaptivePredict operation. + # + # The conditions assume that intermediate + # contains all configurations relevant to the reach set, but this + # condition is not true when one or more configurations have been + # withheld in skippedStopStates, or when the current symbol is EOF. + # + if skippedStopStates is None and t!=Token.EOF: + if len(intermediate)==1: + # Don't pursue the closure if there is just one state. + # It can only have one alternative; just add to result + # Also don't pursue the closure if there is unique alternative + # among the configurations. + reach = intermediate + elif self.getUniqueAlt(intermediate)!=ATN.INVALID_ALT_NUMBER: + # Also don't pursue the closure if there is unique alternative + # among the configurations. + reach = intermediate + + # If the reach set could not be trivially determined, perform a closure + # operation on the intermediate set to compute its initial value. + # + if reach is None: + reach = ATNConfigSet(fullCtx) + closureBusy = set() + treatEofAsEpsilon = t == Token.EOF + for c in intermediate: + self.closure(c, reach, closureBusy, False, fullCtx, treatEofAsEpsilon) + + if t == Token.EOF: + # After consuming EOF no additional input is possible, so we are + # only interested in configurations which reached the end of the + # decision rule (local context) or end of the start rule (full + # context). Update reach to contain only these configurations. This + # handles both explicit EOF transitions in the grammar and implicit + # EOF transitions following the end of the decision or start rule. + # + # When reach==intermediate, no closure operation was performed. In + # this case, removeAllConfigsNotInRuleStopState needs to check for + # reachable rule stop states as well as configurations already in + # a rule stop state. + # + # This is handled before the configurations in skippedStopStates, + # because any configurations potentially added from that list are + # already guaranteed to meet this condition whether or not it's + # required. + # + reach = self.removeAllConfigsNotInRuleStopState(reach, reach is intermediate) + + # If skippedStopStates is not null, then it contains at least one + # configuration. For full-context reach operations, these + # configurations reached the end of the start rule, in which case we + # only add them back to reach if no configuration during the current + # closure operation reached such a state. This ensures adaptivePredict + # chooses an alternative matching the longest overall sequence when + # multiple alternatives are viable. + # + if skippedStopStates is not None and ( (not fullCtx) or (not PredictionMode.hasConfigInRuleStopState(reach))): + for c in skippedStopStates: + reach.add(c, self.mergeCache) + if len(reach)==0: + return None + else: + return reach + + # + # Return a configuration set containing only the configurations from + # {@code configs} which are in a {@link RuleStopState}. If all + # configurations in {@code configs} are already in a rule stop state, this + # method simply returns {@code configs}. + # + #

When {@code lookToEndOfRule} is true, this method uses + # {@link ATN#nextTokens} for each configuration in {@code configs} which is + # not already in a rule stop state to see if a rule stop state is reachable + # from the configuration via epsilon-only transitions.

+ # + # @param configs the configuration set to update + # @param lookToEndOfRule when true, this method checks for rule stop states + # reachable by epsilon-only transitions from each configuration in + # {@code configs}. + # + # @return {@code configs} if all configurations in {@code configs} are in a + # rule stop state, otherwise return a new configuration set containing only + # the configurations from {@code configs} which are in a rule stop state + # + def removeAllConfigsNotInRuleStopState(self, configs:ATNConfigSet, lookToEndOfRule:bool): + if PredictionMode.allConfigsInRuleStopStates(configs): + return configs + result = ATNConfigSet(configs.fullCtx) + for config in configs: + if isinstance(config.state, RuleStopState): + result.add(config, self.mergeCache) + continue + if lookToEndOfRule and config.state.epsilonOnlyTransitions: + nextTokens = self.atn.nextTokens(config.state) + if Token.EPSILON in nextTokens: + endOfRuleState = self.atn.ruleToStopState[config.state.ruleIndex] + result.add(ATNConfig(state=endOfRuleState, config=config), self.mergeCache) + return result + + def computeStartState(self, p:ATNState, ctx:RuleContext, fullCtx:bool): + # always at least the implicit call to start rule + initialContext = PredictionContextFromRuleContext(self.atn, ctx) + configs = ATNConfigSet(fullCtx) + + for i in range(0, len(p.transitions)): + target = p.transitions[i].target + c = ATNConfig(target, i+1, initialContext) + closureBusy = set() + self.closure(c, configs, closureBusy, True, fullCtx, False) + return configs + + # + # This method transforms the start state computed by + # {@link #computeStartState} to the special start state used by a + # precedence DFA for a particular precedence value. The transformation + # process applies the following changes to the start state's configuration + # set. + # + #
    + #
  1. Evaluate the precedence predicates for each configuration using + # {@link SemanticContext#evalPrecedence}.
  2. + #
  3. Remove all configurations which predict an alternative greater than + # 1, for which another configuration that predicts alternative 1 is in the + # same ATN state with the same prediction context. This transformation is + # valid for the following reasons: + # + #
  4. + #
+ # + #

+ # The prediction context must be considered by this filter to address + # situations like the following. + #

+ # + #
+    # grammar TA;
+    # prog: statement* EOF;
+    # statement: letterA | statement letterA 'b' ;
+    # letterA: 'a';
+    # 
+ #
+ #

+ # If the above grammar, the ATN state immediately before the token + # reference {@code 'a'} in {@code letterA} is reachable from the left edge + # of both the primary and closure blocks of the left-recursive rule + # {@code statement}. The prediction context associated with each of these + # configurations distinguishes between them, and prevents the alternative + # which stepped out to {@code prog} (and then back in to {@code statement} + # from being eliminated by the filter. + #

+ # + # @param configs The configuration set computed by + # {@link #computeStartState} as the start state for the DFA. + # @return The transformed configuration set representing the start state + # for a precedence DFA at a particular precedence level (determined by + # calling {@link Parser#getPrecedence}). + # + def applyPrecedenceFilter(self, configs:ATNConfigSet): + statesFromAlt1 = dict() + configSet = ATNConfigSet(configs.fullCtx) + for config in configs: + # handle alt 1 first + if config.alt != 1: + continue + updatedContext = config.semanticContext.evalPrecedence(self.parser, self._outerContext) + if updatedContext is None: + # the configuration was eliminated + continue + + statesFromAlt1[config.state.stateNumber] = config.context + if updatedContext is not config.semanticContext: + configSet.add(ATNConfig(config=config, semantic=updatedContext), self.mergeCache) + else: + configSet.add(config, self.mergeCache) + + for config in configs: + if config.alt == 1: + # already handled + continue + + # In the future, this elimination step could be updated to also + # filter the prediction context for alternatives predicting alt>1 + # (basically a graph subtraction algorithm). + # + if not config.precedenceFilterSuppressed: + context = statesFromAlt1.get(config.state.stateNumber, None) + if context==config.context: + # eliminated + continue + + configSet.add(config, self.mergeCache) + + return configSet + + def getReachableTarget(self, trans:Transition, ttype:int): + if trans.matches(ttype, 0, self.atn.maxTokenType): + return trans.target + else: + return None + + def getPredsForAmbigAlts(self, ambigAlts:set, configs:ATNConfigSet, nalts:int): + # REACH=[1|1|[]|0:0, 1|2|[]|0:1] + # altToPred starts as an array of all null contexts. The entry at index i + # corresponds to alternative i. altToPred[i] may have one of three values: + # 1. null: no ATNConfig c is found such that c.alt==i + # 2. SemanticContext.NONE: At least one ATNConfig c exists such that + # c.alt==i and c.semanticContext==SemanticContext.NONE. In other words, + # alt i has at least one unpredicated config. + # 3. Non-NONE Semantic Context: There exists at least one, and for all + # ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE. + # + # From this, it is clear that NONE||anything==NONE. + # + altToPred = [None] * (nalts + 1) + for c in configs: + if c.alt in ambigAlts: + altToPred[c.alt] = orContext(altToPred[c.alt], c.semanticContext) + + nPredAlts = 0 + for i in range(1, nalts+1): + if altToPred[i] is None: + altToPred[i] = SemanticContext.NONE + elif altToPred[i] is not SemanticContext.NONE: + nPredAlts += 1 + + # nonambig alts are null in altToPred + if nPredAlts==0: + altToPred = None + if ParserATNSimulator.debug: + print("getPredsForAmbigAlts result " + str_list(altToPred)) + return altToPred + + def getPredicatePredictions(self, ambigAlts:set, altToPred:list): + pairs = [] + containsPredicate = False + for i in range(1, len(altToPred)): + pred = altToPred[i] + # unpredicated is indicated by SemanticContext.NONE + if ambigAlts is not None and i in ambigAlts: + pairs.append(PredPrediction(pred, i)) + if pred is not SemanticContext.NONE: + containsPredicate = True + + if not containsPredicate: + return None + + return pairs + + # + # This method is used to improve the localization of error messages by + # choosing an alternative rather than throwing a + # {@link NoViableAltException} in particular prediction scenarios where the + # {@link #ERROR} state was reached during ATN simulation. + # + #

+ # The default implementation of this method uses the following + # algorithm to identify an ATN configuration which successfully parsed the + # decision entry rule. Choosing such an alternative ensures that the + # {@link ParserRuleContext} returned by the calling rule will be complete + # and valid, and the syntax error will be reported later at a more + # localized location.

+ # + # + # + #

+ # In some scenarios, the algorithm described above could predict an + # alternative which will result in a {@link FailedPredicateException} in + # the parser. Specifically, this could occur if the only configuration + # capable of successfully parsing to the end of the decision rule is + # blocked by a semantic predicate. By choosing this alternative within + # {@link #adaptivePredict} instead of throwing a + # {@link NoViableAltException}, the resulting + # {@link FailedPredicateException} in the parser will identify the specific + # predicate which is preventing the parser from successfully parsing the + # decision rule, which helps developers identify and correct logic errors + # in semantic predicates. + #

+ # + # @param configs The ATN configurations which were valid immediately before + # the {@link #ERROR} state was reached + # @param outerContext The is the \gamma_0 initial parser context from the paper + # or the parser stack at the instant before prediction commences. + # + # @return The value to return from {@link #adaptivePredict}, or + # {@link ATN#INVALID_ALT_NUMBER} if a suitable alternative was not + # identified and {@link #adaptivePredict} should report an error instead. + # + def getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(self, configs:ATNConfigSet, outerContext:ParserRuleContext): + semValidConfigs, semInvalidConfigs = self.splitAccordingToSemanticValidity(configs, outerContext) + alt = self.getAltThatFinishedDecisionEntryRule(semValidConfigs) + if alt!=ATN.INVALID_ALT_NUMBER: # semantically/syntactically viable path exists + return alt + # Is there a syntactically valid path with a failed pred? + if len(semInvalidConfigs)>0: + alt = self.getAltThatFinishedDecisionEntryRule(semInvalidConfigs) + if alt!=ATN.INVALID_ALT_NUMBER: # syntactically viable path exists + return alt + return ATN.INVALID_ALT_NUMBER + + def getAltThatFinishedDecisionEntryRule(self, configs:ATNConfigSet): + alts = set() + for c in configs: + if c.reachesIntoOuterContext>0 or (isinstance(c.state, RuleStopState) and c.context.hasEmptyPath() ): + alts.add(c.alt) + if len(alts)==0: + return ATN.INVALID_ALT_NUMBER + else: + return min(alts) + + # Walk the list of configurations and split them according to + # those that have preds evaluating to true/false. If no pred, assume + # true pred and include in succeeded set. Returns Pair of sets. + # + # Create a new set so as not to alter the incoming parameter. + # + # Assumption: the input stream has been restored to the starting point + # prediction, which is where predicates need to evaluate. + # + def splitAccordingToSemanticValidity(self, configs:ATNConfigSet, outerContext:ParserRuleContext): + succeeded = ATNConfigSet(configs.fullCtx) + failed = ATNConfigSet(configs.fullCtx) + for c in configs: + if c.semanticContext is not SemanticContext.NONE: + predicateEvaluationResult = c.semanticContext.eval(self.parser, outerContext) + if predicateEvaluationResult: + succeeded.add(c) + else: + failed.add(c) + else: + succeeded.add(c) + return (succeeded,failed) + + # Look through a list of predicate/alt pairs, returning alts for the + # pairs that win. A {@code NONE} predicate indicates an alt containing an + # unpredicated config which behaves as "always true." If !complete + # then we stop at the first predicate that evaluates to true. This + # includes pairs with null predicates. + # + def evalSemanticContext(self, predPredictions:list, outerContext:ParserRuleContext, complete:bool): + predictions = set() + for pair in predPredictions: + if pair.pred is SemanticContext.NONE: + predictions.add(pair.alt) + if not complete: + break + continue + predicateEvaluationResult = pair.pred.eval(self.parser, outerContext) + if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug: + print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult)) + + if predicateEvaluationResult: + if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug: + print("PREDICT " + str(pair.alt)) + predictions.add(pair.alt) + if not complete: + break + return predictions + + + # TODO: If we are doing predicates, there is no point in pursuing + # closure operations if we reach a DFA state that uniquely predicts + # alternative. We will not be caching that DFA state and it is a + # waste to pursue the closure. Might have to advance when we do + # ambig detection thought :( + # + + def closure(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, treatEofAsEpsilon:bool): + initialDepth = 0 + self.closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEofAsEpsilon) + + + def closureCheckingStopState(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool): + if ParserATNSimulator.debug: + print("closure(" + str(config) + ")") + + if isinstance(config.state, RuleStopState): + # We hit rule end. If we have context info, use it + # run thru all possible stack tops in ctx + if not config.context.isEmpty(): + for i in range(0, len(config.context)): + state = config.context.getReturnState(i) + if state is PredictionContext.EMPTY_RETURN_STATE: + if fullCtx: + configs.add(ATNConfig(state=config.state, context=PredictionContext.EMPTY, config=config), self.mergeCache) + continue + else: + # we have no context info, just chase follow links (if greedy) + if ParserATNSimulator.debug: + print("FALLING off rule " + self.getRuleName(config.state.ruleIndex)) + self.closure_(config, configs, closureBusy, collectPredicates, + fullCtx, depth, treatEofAsEpsilon) + continue + returnState = self.atn.states[state] + newContext = config.context.getParent(i) # "pop" return state + c = ATNConfig(state=returnState, alt=config.alt, context=newContext, semantic=config.semanticContext) + # While we have context to pop back from, we may have + # gotten that context AFTER having falling off a rule. + # Make sure we track that we are now out of context. + c.reachesIntoOuterContext = config.reachesIntoOuterContext + self.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth - 1, treatEofAsEpsilon) + return + elif fullCtx: + # reached end of start rule + configs.add(config, self.mergeCache) + return + else: + # else if we have no context info, just chase follow links (if greedy) + if ParserATNSimulator.debug: + print("FALLING off rule " + self.getRuleName(config.state.ruleIndex)) + + self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon) + + # Do the actual work of walking epsilon edges# + def closure_(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool): + p = config.state + # optimization + if not p.epsilonOnlyTransitions: + configs.add(config, self.mergeCache) + # make sure to not return here, because EOF transitions can act as + # both epsilon transitions and non-epsilon transitions. + + first = True + for t in p.transitions: + if first: + first = False + if self.canDropLoopEntryEdgeInLeftRecursiveRule(config): + continue + + continueCollecting = collectPredicates and not isinstance(t, ActionTransition) + c = self.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEofAsEpsilon) + if c is not None: + newDepth = depth + if isinstance( config.state, RuleStopState): + # target fell off end of rule; mark resulting c as having dipped into outer context + # We can't get here if incoming config was rule stop and we had context + # track how far we dip into outer context. Might + # come in handy and we avoid evaluating context dependent + # preds if this is > 0. + if self._dfa is not None and self._dfa.precedenceDfa: + if t.outermostPrecedenceReturn == self._dfa.atnStartState.ruleIndex: + c.precedenceFilterSuppressed = True + c.reachesIntoOuterContext += 1 + if c in closureBusy: + # avoid infinite recursion for right-recursive rules + continue + closureBusy.add(c) + configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method + newDepth -= 1 + if ParserATNSimulator.debug: + print("dips into outer ctx: " + str(c)) + else: + if not t.isEpsilon: + if c in closureBusy: + # avoid infinite recursion for EOF* and EOF+ + continue + closureBusy.add(c) + if isinstance(t, RuleTransition): + # latch when newDepth goes negative - once we step out of the entry context we can't return + if newDepth >= 0: + newDepth += 1 + + self.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEofAsEpsilon) + + + + # Implements first-edge (loop entry) elimination as an optimization + # during closure operations. See antlr/antlr4#1398. + # + # The optimization is to avoid adding the loop entry config when + # the exit path can only lead back to the same + # StarLoopEntryState after popping context at the rule end state + # (traversing only epsilon edges, so we're still in closure, in + # this same rule). + # + # We need to detect any state that can reach loop entry on + # epsilon w/o exiting rule. We don't have to look at FOLLOW + # links, just ensure that all stack tops for config refer to key + # states in LR rule. + # + # To verify we are in the right situation we must first check + # closure is at a StarLoopEntryState generated during LR removal. + # Then we check that each stack top of context is a return state + # from one of these cases: + # + # 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state + # 2. expr op expr. The return state is the block end of internal block of (...)* + # 3. 'between' expr 'and' expr. The return state of 2nd expr reference. + # That state points at block end of internal block of (...)*. + # 4. expr '?' expr ':' expr. The return state points at block end, + # which points at loop entry state. + # + # If any is true for each stack top, then closure does not add a + # config to the current config set for edge[0], the loop entry branch. + # + # Conditions fail if any context for the current config is: + # + # a. empty (we'd fall out of expr to do a global FOLLOW which could + # even be to some weird spot in expr) or, + # b. lies outside of expr or, + # c. lies within expr but at a state not the BlockEndState + # generated during LR removal + # + # Do we need to evaluate predicates ever in closure for this case? + # + # No. Predicates, including precedence predicates, are only + # evaluated when computing a DFA start state. I.e., only before + # the lookahead (but not parser) consumes a token. + # + # There are no epsilon edges allowed in LR rule alt blocks or in + # the "primary" part (ID here). If closure is in + # StarLoopEntryState any lookahead operation will have consumed a + # token as there are no epsilon-paths that lead to + # StarLoopEntryState. We do not have to evaluate predicates + # therefore if we are in the generated StarLoopEntryState of a LR + # rule. Note that when making a prediction starting at that + # decision point, decision d=2, compute-start-state performs + # closure starting at edges[0], edges[1] emanating from + # StarLoopEntryState. That means it is not performing closure on + # StarLoopEntryState during compute-start-state. + # + # How do we know this always gives same prediction answer? + # + # Without predicates, loop entry and exit paths are ambiguous + # upon remaining input +b (in, say, a+b). Either paths lead to + # valid parses. Closure can lead to consuming + immediately or by + # falling out of this call to expr back into expr and loop back + # again to StarLoopEntryState to match +b. In this special case, + # we choose the more efficient path, which is to take the bypass + # path. + # + # The lookahead language has not changed because closure chooses + # one path over the other. Both paths lead to consuming the same + # remaining input during a lookahead operation. If the next token + # is an operator, lookahead will enter the choice block with + # operators. If it is not, lookahead will exit expr. Same as if + # closure had chosen to enter the choice block immediately. + # + # Closure is examining one config (some loopentrystate, some alt, + # context) which means it is considering exactly one alt. Closure + # always copies the same alt to any derived configs. + # + # How do we know this optimization doesn't mess up precedence in + # our parse trees? + # + # Looking through expr from left edge of stat only has to confirm + # that an input, say, a+b+c; begins with any valid interpretation + # of an expression. The precedence actually doesn't matter when + # making a decision in stat seeing through expr. It is only when + # parsing rule expr that we must use the precedence to get the + # right interpretation and, hence, parse tree. + # + # @since 4.6 + # + def canDropLoopEntryEdgeInLeftRecursiveRule(self, config): + # return False + p = config.state + # First check to see if we are in StarLoopEntryState generated during + # left-recursion elimination. For efficiency, also check if + # the context has an empty stack case. If so, it would mean + # global FOLLOW so we can't perform optimization + # Are we the special loop entry/exit state? or SLL wildcard + if p.stateType != ATNState.STAR_LOOP_ENTRY \ + or not p.isPrecedenceDecision \ + or config.context.isEmpty() \ + or config.context.hasEmptyPath(): + return False + + # Require all return states to return back to the same rule + # that p is in. + numCtxs = len(config.context) + for i in range(0, numCtxs): # for each stack context + returnState = self.atn.states[config.context.getReturnState(i)] + if returnState.ruleIndex != p.ruleIndex: + return False + + decisionStartState = p.transitions[0].target + blockEndStateNum = decisionStartState.endState.stateNumber + blockEndState = self.atn.states[blockEndStateNum] + + # Verify that the top of each stack context leads to loop entry/exit + # state through epsilon edges and w/o leaving rule. + for i in range(0, numCtxs): # for each stack context + returnStateNumber = config.context.getReturnState(i) + returnState = self.atn.states[returnStateNumber] + # all states must have single outgoing epsilon edge + if len(returnState.transitions) != 1 or not returnState.transitions[0].isEpsilon: + return False + + # Look for prefix op case like 'not expr', (' type ')' expr + returnStateTarget = returnState.transitions[0].target + if returnState.stateType == ATNState.BLOCK_END and returnStateTarget is p: + continue + + # Look for 'expr op expr' or case where expr's return state is block end + # of (...)* internal block; the block end points to loop back + # which points to p but we don't need to check that + if returnState is blockEndState: + continue + + # Look for ternary expr ? expr : expr. The return state points at block end, + # which points at loop entry state + if returnStateTarget is blockEndState: + continue + + # Look for complex prefix 'between expr and expr' case where 2nd expr's + # return state points at block end state of (...)* internal block + if returnStateTarget.stateType == ATNState.BLOCK_END \ + and len(returnStateTarget.transitions) == 1 \ + and returnStateTarget.transitions[0].isEpsilon \ + and returnStateTarget.transitions[0].target is p: + continue + + # anything else ain't conforming + return False + + return True + + + def getRuleName(self, index:int): + if self.parser is not None and index>=0: + return self.parser.ruleNames[index] + else: + return "" + + epsilonTargetMethods = dict() + epsilonTargetMethods[Transition.RULE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \ + sim.ruleTransition(config, t) + epsilonTargetMethods[Transition.PRECEDENCE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \ + sim.precedenceTransition(config, t, collectPredicates, inContext, fullCtx) + epsilonTargetMethods[Transition.PREDICATE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \ + sim.predTransition(config, t, collectPredicates, inContext, fullCtx) + epsilonTargetMethods[Transition.ACTION] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \ + sim.actionTransition(config, t) + epsilonTargetMethods[Transition.EPSILON] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \ + ATNConfig(state=t.target, config=config) + epsilonTargetMethods[Transition.ATOM] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \ + ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None + epsilonTargetMethods[Transition.RANGE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \ + ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None + epsilonTargetMethods[Transition.SET] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \ + ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None + + def getEpsilonTarget(self, config:ATNConfig, t:Transition, collectPredicates:bool, inContext:bool, fullCtx:bool, treatEofAsEpsilon:bool): + m = self.epsilonTargetMethods.get(t.serializationType, None) + if m is None: + return None + else: + return m(self, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon) + + def actionTransition(self, config:ATNConfig, t:ActionTransition): + if ParserATNSimulator.debug: + print("ACTION edge " + str(t.ruleIndex) + ":" + str(t.actionIndex)) + return ATNConfig(state=t.target, config=config) + + def precedenceTransition(self, config:ATNConfig, pt:PrecedencePredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool): + if ParserATNSimulator.debug: + print("PRED (collectPredicates=" + str(collectPredicates) + ") " + + str(pt.precedence) + ">=_p, ctx dependent=true") + if self.parser is not None: + print("context surrounding pred is " + str(self.parser.getRuleInvocationStack())) + + c = None + if collectPredicates and inContext: + if fullCtx: + # In full context mode, we can evaluate predicates on-the-fly + # during closure, which dramatically reduces the size of + # the config sets. It also obviates the need to test predicates + # later during conflict resolution. + currentPosition = self._input.index + self._input.seek(self._startIndex) + predSucceeds = pt.getPredicate().eval(self.parser, self._outerContext) + self._input.seek(currentPosition) + if predSucceeds: + c = ATNConfig(state=pt.target, config=config) # no pred context + else: + newSemCtx = andContext(config.semanticContext, pt.getPredicate()) + c = ATNConfig(state=pt.target, semantic=newSemCtx, config=config) + else: + c = ATNConfig(state=pt.target, config=config) + + if ParserATNSimulator.debug: + print("config from pred transition=" + str(c)) + return c + + def predTransition(self, config:ATNConfig, pt:PredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool): + if ParserATNSimulator.debug: + print("PRED (collectPredicates=" + str(collectPredicates) + ") " + str(pt.ruleIndex) + + ":" + str(pt.predIndex) + ", ctx dependent=" + str(pt.isCtxDependent)) + if self.parser is not None: + print("context surrounding pred is " + str(self.parser.getRuleInvocationStack())) + + c = None + if collectPredicates and (not pt.isCtxDependent or (pt.isCtxDependent and inContext)): + if fullCtx: + # In full context mode, we can evaluate predicates on-the-fly + # during closure, which dramatically reduces the size of + # the config sets. It also obviates the need to test predicates + # later during conflict resolution. + currentPosition = self._input.index + self._input.seek(self._startIndex) + predSucceeds = pt.getPredicate().eval(self.parser, self._outerContext) + self._input.seek(currentPosition) + if predSucceeds: + c = ATNConfig(state=pt.target, config=config) # no pred context + else: + newSemCtx = andContext(config.semanticContext, pt.getPredicate()) + c = ATNConfig(state=pt.target, semantic=newSemCtx, config=config) + else: + c = ATNConfig(state=pt.target, config=config) + + if ParserATNSimulator.debug: + print("config from pred transition=" + str(c)) + return c + + def ruleTransition(self, config:ATNConfig, t:RuleTransition): + if ParserATNSimulator.debug: + print("CALL rule " + self.getRuleName(t.target.ruleIndex) + ", ctx=" + str(config.context)) + returnState = t.followState + newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber) + return ATNConfig(state=t.target, context=newContext, config=config ) + + def getConflictingAlts(self, configs:ATNConfigSet): + altsets = PredictionMode.getConflictingAltSubsets(configs) + return PredictionMode.getAlts(altsets) + + # Sam pointed out a problem with the previous definition, v3, of + # ambiguous states. If we have another state associated with conflicting + # alternatives, we should keep going. For example, the following grammar + # + # s : (ID | ID ID?) ';' ; + # + # When the ATN simulation reaches the state before ';', it has a DFA + # state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally + # 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node + # because alternative to has another way to continue, via [6|2|[]]. + # The key is that we have a single state that has config's only associated + # with a single alternative, 2, and crucially the state transitions + # among the configurations are all non-epsilon transitions. That means + # we don't consider any conflicts that include alternative 2. So, we + # ignore the conflict between alts 1 and 2. We ignore a set of + # conflicting alts when there is an intersection with an alternative + # associated with a single alt state in the state→config-list map. + # + # It's also the case that we might have two conflicting configurations but + # also a 3rd nonconflicting configuration for a different alternative: + # [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: + # + # a : A | A | A B ; + # + # After matching input A, we reach the stop state for rule A, state 1. + # State 8 is the state right before B. Clearly alternatives 1 and 2 + # conflict and no amount of further lookahead will separate the two. + # However, alternative 3 will be able to continue and so we do not + # stop working on this state. In the previous example, we're concerned + # with states associated with the conflicting alternatives. Here alt + # 3 is not associated with the conflicting configs, but since we can continue + # looking for input reasonably, I don't declare the state done. We + # ignore a set of conflicting alts when we have an alternative + # that we still need to pursue. + # + + def getConflictingAltsOrUniqueAlt(self, configs:ATNConfigSet): + conflictingAlts = None + if configs.uniqueAlt!= ATN.INVALID_ALT_NUMBER: + conflictingAlts = set() + conflictingAlts.add(configs.uniqueAlt) + else: + conflictingAlts = configs.conflictingAlts + return conflictingAlts + + def getTokenName(self, t:int): + if t==Token.EOF: + return "EOF" + if self.parser is not None and \ + self.parser.literalNames is not None and \ + t < len(self.parser.literalNames): + return self.parser.literalNames[t] + "<" + str(t) + ">" + else: + return str(t) + + def getLookaheadName(self, input:TokenStream): + return self.getTokenName(input.LA(1)) + + # Used for debugging in adaptivePredict around execATN but I cut + # it out for clarity now that alg. works well. We can leave this + # "dead" code for a bit. + # + def dumpDeadEndConfigs(self, nvae:NoViableAltException): + print("dead end configs: ") + for c in nvae.getDeadEndConfigs(): + trans = "no edges" + if len(c.state.transitions)>0: + t = c.state.transitions[0] + if isinstance(t, AtomTransition): + trans = "Atom "+ self.getTokenName(t.label) + elif isinstance(t, SetTransition): + neg = isinstance(t, NotSetTransition) + trans = ("~" if neg else "")+"Set "+ str(t.set) + print(c.toString(self.parser, True) + ":" + trans, file=sys.stderr) + + def noViableAlt(self, input:TokenStream, outerContext:ParserRuleContext, configs:ATNConfigSet, startIndex:int): + return NoViableAltException(self.parser, input, input.get(startIndex), input.LT(1), configs, outerContext) + + def getUniqueAlt(self, configs:ATNConfigSet): + alt = ATN.INVALID_ALT_NUMBER + for c in configs: + if alt == ATN.INVALID_ALT_NUMBER: + alt = c.alt # found first alt + elif c.alt!=alt: + return ATN.INVALID_ALT_NUMBER + return alt + + # + # Add an edge to the DFA, if possible. This method calls + # {@link #addDFAState} to ensure the {@code to} state is present in the + # DFA. If {@code from} is {@code null}, or if {@code t} is outside the + # range of edges that can be represented in the DFA tables, this method + # returns without adding the edge to the DFA. + # + #

If {@code to} is {@code null}, this method returns {@code null}. + # Otherwise, this method returns the {@link DFAState} returned by calling + # {@link #addDFAState} for the {@code to} state.

+ # + # @param dfa The DFA + # @param from The source state for the edge + # @param t The input symbol + # @param to The target state for the edge + # + # @return If {@code to} is {@code null}, this method returns {@code null}; + # otherwise this method returns the result of calling {@link #addDFAState} + # on {@code to} + # + def addDFAEdge(self, dfa:DFA, from_:DFAState, t:int, to:DFAState): + if ParserATNSimulator.debug: + print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t)) + + if to is None: + return None + + to = self.addDFAState(dfa, to) # used existing if possible not incoming + if from_ is None or t < -1 or t > self.atn.maxTokenType: + return to + + if from_.edges is None: + from_.edges = [None] * (self.atn.maxTokenType + 2) + from_.edges[t+1] = to # connect + + if ParserATNSimulator.debug: + names = None if self.parser is None else self.parser.literalNames + print("DFA=\n" + dfa.toString(names)) + + return to + + # + # Add state {@code D} to the DFA if it is not already present, and return + # the actual instance stored in the DFA. If a state equivalent to {@code D} + # is already in the DFA, the existing state is returned. Otherwise this + # method returns {@code D} after adding it to the DFA. + # + #

If {@code D} is {@link #ERROR}, this method returns {@link #ERROR} and + # does not change the DFA.

+ # + # @param dfa The dfa + # @param D The DFA state to add + # @return The state stored in the DFA. This will be either the existing + # state if {@code D} is already in the DFA, or {@code D} itself if the + # state was not already present. + # + def addDFAState(self, dfa:DFA, D:DFAState): + if D is self.ERROR: + return D + + + existing = dfa.states.get(D, None) + if existing is not None: + return existing + + D.stateNumber = len(dfa.states) + if not D.configs.readonly: + D.configs.optimizeConfigs(self) + D.configs.setReadonly(True) + dfa.states[D] = D + if ParserATNSimulator.debug: + print("adding new DFA state: " + str(D)) + return D + + def reportAttemptingFullContext(self, dfa:DFA, conflictingAlts:set, configs:ATNConfigSet, startIndex:int, stopIndex:int): + if ParserATNSimulator.debug or ParserATNSimulator.retry_debug: + print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) + + ", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex)) + if self.parser is not None: + self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + + def reportContextSensitivity(self, dfa:DFA, prediction:int, configs:ATNConfigSet, startIndex:int, stopIndex:int): + if ParserATNSimulator.debug or ParserATNSimulator.retry_debug: + print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) + + ", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex)) + if self.parser is not None: + self.parser.getErrorListenerDispatch().reportContextSensitivity(self.parser, dfa, startIndex, stopIndex, prediction, configs) + + # If context sensitive parsing, we know it's ambiguity not conflict# + def reportAmbiguity(self, dfa:DFA, D:DFAState, startIndex:int, stopIndex:int, + exact:bool, ambigAlts:set, configs:ATNConfigSet ): + if ParserATNSimulator.debug or ParserATNSimulator.retry_debug: +# ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn); +# int i = 1; +# for (Transition t : dfa.atnStartState.transitions) { +# print("ALT "+i+"="); +# print(startIndex+".."+stopIndex+", len(input)="+parser.getInputStream().size()); +# TraceTree path = finder.trace(t.target, parser.getContext(), (TokenStream)parser.getInputStream(), +# startIndex, stopIndex); +# if ( path!=null ) { +# print("path = "+path.toStringTree()); +# for (TraceTree leaf : path.leaves) { +# List states = path.getPathToNode(leaf); +# print("states="+states); +# } +# } +# i++; +# } + print("reportAmbiguity " + str(ambigAlts) + ":" + str(configs) + + ", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex)) + if self.parser is not None: + self.parser.getErrorListenerDispatch().reportAmbiguity(self.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/PredictionMode.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/PredictionMode.py new file mode 100644 index 0000000000000000000000000000000000000000..8e5c73bb47f329d519f4e574ba3a36fc6c4ac29f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/PredictionMode.py @@ -0,0 +1,499 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# +# +# This enumeration defines the prediction modes available in ANTLR 4 along with +# utility methods for analyzing configuration sets for conflicts and/or +# ambiguities. + + +from enum import Enum +from antlr4.atn.ATN import ATN +from antlr4.atn.ATNConfig import ATNConfig +from antlr4.atn.ATNConfigSet import ATNConfigSet +from antlr4.atn.ATNState import RuleStopState +from antlr4.atn.SemanticContext import SemanticContext + +PredictionMode = None + +class PredictionMode(Enum): + # + # The SLL(*) prediction mode. This prediction mode ignores the current + # parser context when making predictions. This is the fastest prediction + # mode, and provides correct results for many grammars. This prediction + # mode is more powerful than the prediction mode provided by ANTLR 3, but + # may result in syntax errors for grammar and input combinations which are + # not SLL. + # + #

+ # When using this prediction mode, the parser will either return a correct + # parse tree (i.e. the same parse tree that would be returned with the + # {@link #LL} prediction mode), or it will report a syntax error. If a + # syntax error is encountered when using the {@link #SLL} prediction mode, + # it may be due to either an actual syntax error in the input or indicate + # that the particular combination of grammar and input requires the more + # powerful {@link #LL} prediction abilities to complete successfully.

+ # + #

+ # This prediction mode does not provide any guarantees for prediction + # behavior for syntactically-incorrect inputs.

+ # + SLL = 0 + # + # The LL(*) prediction mode. This prediction mode allows the current parser + # context to be used for resolving SLL conflicts that occur during + # prediction. This is the fastest prediction mode that guarantees correct + # parse results for all combinations of grammars with syntactically correct + # inputs. + # + #

+ # When using this prediction mode, the parser will make correct decisions + # for all syntactically-correct grammar and input combinations. However, in + # cases where the grammar is truly ambiguous this prediction mode might not + # report a precise answer for exactly which alternatives are + # ambiguous.

+ # + #

+ # This prediction mode does not provide any guarantees for prediction + # behavior for syntactically-incorrect inputs.

+ # + LL = 1 + # + # The LL(*) prediction mode with exact ambiguity detection. In addition to + # the correctness guarantees provided by the {@link #LL} prediction mode, + # this prediction mode instructs the prediction algorithm to determine the + # complete and exact set of ambiguous alternatives for every ambiguous + # decision encountered while parsing. + # + #

+ # This prediction mode may be used for diagnosing ambiguities during + # grammar development. Due to the performance overhead of calculating sets + # of ambiguous alternatives, this prediction mode should be avoided when + # the exact results are not necessary.

+ # + #

+ # This prediction mode does not provide any guarantees for prediction + # behavior for syntactically-incorrect inputs.

+ # + LL_EXACT_AMBIG_DETECTION = 2 + + + # + # Computes the SLL prediction termination condition. + # + #

+ # This method computes the SLL prediction termination condition for both of + # the following cases.

+ # + # + # + #

COMBINED SLL+LL PARSING

+ # + #

When LL-fallback is enabled upon SLL conflict, correct predictions are + # ensured regardless of how the termination condition is computed by this + # method. Due to the substantially higher cost of LL prediction, the + # prediction should only fall back to LL when the additional lookahead + # cannot lead to a unique SLL prediction.

+ # + #

Assuming combined SLL+LL parsing, an SLL configuration set with only + # conflicting subsets should fall back to full LL, even if the + # configuration sets don't resolve to the same alternative (e.g. + # {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting + # configuration, SLL could continue with the hopes that more lookahead will + # resolve via one of those non-conflicting configurations.

+ # + #

Here's the prediction termination rule them: SLL (for SLL+LL parsing) + # stops when it sees only conflicting configuration subsets. In contrast, + # full LL keeps going when there is uncertainty.

+ # + #

HEURISTIC

+ # + #

As a heuristic, we stop prediction when we see any conflicting subset + # unless we see a state that only has one alternative associated with it. + # The single-alt-state thing lets prediction continue upon rules like + # (otherwise, it would admit defeat too soon):

+ # + #

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}

+ # + #

When the ATN simulation reaches the state before {@code ';'}, it has a + # DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally + # {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop + # processing this node because alternative to has another way to continue, + # via {@code [6|2|[]]}.

+ # + #

It also let's us continue for this rule:

+ # + #

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}

+ # + #

After matching input A, we reach the stop state for rule A, state 1. + # State 8 is the state right before B. Clearly alternatives 1 and 2 + # conflict and no amount of further lookahead will separate the two. + # However, alternative 3 will be able to continue and so we do not stop + # working on this state. In the previous example, we're concerned with + # states associated with the conflicting alternatives. Here alt 3 is not + # associated with the conflicting configs, but since we can continue + # looking for input reasonably, don't declare the state done.

+ # + #

PURE SLL PARSING

+ # + #

To handle pure SLL parsing, all we have to do is make sure that we + # combine stack contexts for configurations that differ only by semantic + # predicate. From there, we can do the usual SLL termination heuristic.

+ # + #

PREDICATES IN SLL+LL PARSING

+ # + #

SLL decisions don't evaluate predicates until after they reach DFA stop + # states because they need to create the DFA cache that works in all + # semantic situations. In contrast, full LL evaluates predicates collected + # during start state computation so it can ignore predicates thereafter. + # This means that SLL termination detection can totally ignore semantic + # predicates.

+ # + #

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not + # semantic predicate contexts so we might see two configurations like the + # following.

+ # + #

{@code (s, 1, x, {}), (s, 1, x', {p})}

+ # + #

Before testing these configurations against others, we have to merge + # {@code x} and {@code x'} (without modifying the existing configurations). + # For example, we test {@code (x+x')==x''} when looking for conflicts in + # the following configurations.

+ # + #

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

+ # + #

If the configuration set has predicates (as indicated by + # {@link ATNConfigSet#hasSemanticContext}), this algorithm makes a copy of + # the configurations to strip out all of the predicates so that a standard + # {@link ATNConfigSet} will merge everything ignoring predicates.

+ # + @classmethod + def hasSLLConflictTerminatingPrediction(cls, mode:PredictionMode, configs:ATNConfigSet): + # Configs in rule stop states indicate reaching the end of the decision + # rule (local context) or end of start rule (full context). If all + # configs meet this condition, then none of the configurations is able + # to match additional input so we terminate prediction. + # + if cls.allConfigsInRuleStopStates(configs): + return True + + # pure SLL mode parsing + if mode == PredictionMode.SLL: + # Don't bother with combining configs from different semantic + # contexts if we can fail over to full LL; costs more time + # since we'll often fail over anyway. + if configs.hasSemanticContext: + # dup configs, tossing out semantic predicates + dup = ATNConfigSet() + for c in configs: + c = ATNConfig(config=c, semantic=SemanticContext.NONE) + dup.add(c) + configs = dup + # now we have combined contexts for configs with dissimilar preds + + # pure SLL or combined SLL+LL mode parsing + altsets = cls.getConflictingAltSubsets(configs) + return cls.hasConflictingAltSet(altsets) and not cls.hasStateAssociatedWithOneAlt(configs) + + # Checks if any configuration in {@code configs} is in a + # {@link RuleStopState}. Configurations meeting this condition have reached + # the end of the decision rule (local context) or end of start rule (full + # context). + # + # @param configs the configuration set to test + # @return {@code true} if any configuration in {@code configs} is in a + # {@link RuleStopState}, otherwise {@code false} + @classmethod + def hasConfigInRuleStopState(cls, configs:ATNConfigSet): + return any(isinstance(cfg.state, RuleStopState) for cfg in configs) + + # Checks if all configurations in {@code configs} are in a + # {@link RuleStopState}. Configurations meeting this condition have reached + # the end of the decision rule (local context) or end of start rule (full + # context). + # + # @param configs the configuration set to test + # @return {@code true} if all configurations in {@code configs} are in a + # {@link RuleStopState}, otherwise {@code false} + @classmethod + def allConfigsInRuleStopStates(cls, configs:ATNConfigSet): + return all(isinstance(cfg.state, RuleStopState) for cfg in configs) + + # + # Full LL prediction termination. + # + #

Can we stop looking ahead during ATN simulation or is there some + # uncertainty as to which alternative we will ultimately pick, after + # consuming more input? Even if there are partial conflicts, we might know + # that everything is going to resolve to the same minimum alternative. That + # means we can stop since no more lookahead will change that fact. On the + # other hand, there might be multiple conflicts that resolve to different + # minimums. That means we need more look ahead to decide which of those + # alternatives we should predict.

+ # + #

The basic idea is to split the set of configurations {@code C}, into + # conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with + # non-conflicting configurations. Two configurations conflict if they have + # identical {@link ATNConfig#state} and {@link ATNConfig#context} values + # but different {@link ATNConfig#alt} value, e.g. {@code (s, i, ctx, _)} + # and {@code (s, j, ctx, _)} for {@code i!=j}.

+ # + #

Reduce these configuration subsets to the set of possible alternatives. + # You can compute the alternative subsets in one pass as follows:

+ # + #

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in + # {@code C} holding {@code s} and {@code ctx} fixed.

+ # + #

Or in pseudo-code, for each configuration {@code c} in {@code C}:

+ # + #
+    # map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
+    # alt and not pred
+    # 
+ # + #

The values in {@code map} are the set of {@code A_s,ctx} sets.

+ # + #

If {@code |A_s,ctx|=1} then there is no conflict associated with + # {@code s} and {@code ctx}.

+ # + #

Reduce the subsets to singletons by choosing a minimum of each subset. If + # the union of these alternative subsets is a singleton, then no amount of + # more lookahead will help us. We will always pick that alternative. If, + # however, there is more than one alternative, then we are uncertain which + # alternative to predict and must continue looking for resolution. We may + # or may not discover an ambiguity in the future, even if there are no + # conflicting subsets this round.

+ # + #

The biggest sin is to terminate early because it means we've made a + # decision but were uncertain as to the eventual outcome. We haven't used + # enough lookahead. On the other hand, announcing a conflict too late is no + # big deal; you will still have the conflict. It's just inefficient. It + # might even look until the end of file.

+ # + #

No special consideration for semantic predicates is required because + # predicates are evaluated on-the-fly for full LL prediction, ensuring that + # no configuration contains a semantic context during the termination + # check.

+ # + #

CONFLICTING CONFIGS

+ # + #

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict + # when {@code i!=j} but {@code x=x'}. Because we merge all + # {@code (s, i, _)} configurations together, that means that there are at + # most {@code n} configurations associated with state {@code s} for + # {@code n} possible alternatives in the decision. The merged stacks + # complicate the comparison of configuration contexts {@code x} and + # {@code x'}. Sam checks to see if one is a subset of the other by calling + # merge and checking to see if the merged result is either {@code x} or + # {@code x'}. If the {@code x} associated with lowest alternative {@code i} + # is the superset, then {@code i} is the only possible prediction since the + # others resolve to {@code min(i)} as well. However, if {@code x} is + # associated with {@code j>i} then at least one stack configuration for + # {@code j} is not in conflict with alternative {@code i}. The algorithm + # should keep going, looking for more lookahead due to the uncertainty.

+ # + #

For simplicity, I'm doing a equality check between {@code x} and + # {@code x'} that lets the algorithm continue to consume lookahead longer + # than necessary. The reason I like the equality is of course the + # simplicity but also because that is the test you need to detect the + # alternatives that are actually in conflict.

+ # + #

CONTINUE/STOP RULE

+ # + #

Continue if union of resolved alternative sets from non-conflicting and + # conflicting alternative subsets has more than one alternative. We are + # uncertain about which alternative to predict.

+ # + #

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which + # alternatives are still in the running for the amount of input we've + # consumed at this point. The conflicting sets let us to strip away + # configurations that won't lead to more states because we resolve + # conflicts to the configuration with a minimum alternate for the + # conflicting set.

+ # + #

CASES

+ # + # + # + #

EXACT AMBIGUITY DETECTION

+ # + #

If all states report the same conflicting set of alternatives, then we + # know we have the exact ambiguity set.

+ # + #

|A_i|>1 and + # A_i = A_j for all i, j.

+ # + #

In other words, we continue examining lookahead until all {@code A_i} + # have more than one alternative and all {@code A_i} are the same. If + # {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate + # because the resolved set is {@code {1}}. To determine what the real + # ambiguity is, we have to know whether the ambiguity is between one and + # two or one and three so we keep going. We can only stop prediction when + # we need exact ambiguity detection when the sets look like + # {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

+ # + @classmethod + def resolvesToJustOneViableAlt(cls, altsets:list): + return cls.getSingleViableAlt(altsets) + + # + # Determines if every alternative subset in {@code altsets} contains more + # than one alternative. + # + # @param altsets a collection of alternative subsets + # @return {@code true} if every {@link BitSet} in {@code altsets} has + # {@link BitSet#cardinality cardinality} > 1, otherwise {@code false} + # + @classmethod + def allSubsetsConflict(cls, altsets:list): + return not cls.hasNonConflictingAltSet(altsets) + + # + # Determines if any single alternative subset in {@code altsets} contains + # exactly one alternative. + # + # @param altsets a collection of alternative subsets + # @return {@code true} if {@code altsets} contains a {@link BitSet} with + # {@link BitSet#cardinality cardinality} 1, otherwise {@code false} + # + @classmethod + def hasNonConflictingAltSet(cls, altsets:list): + return any(len(alts) == 1 for alts in altsets) + + # + # Determines if any single alternative subset in {@code altsets} contains + # more than one alternative. + # + # @param altsets a collection of alternative subsets + # @return {@code true} if {@code altsets} contains a {@link BitSet} with + # {@link BitSet#cardinality cardinality} > 1, otherwise {@code false} + # + @classmethod + def hasConflictingAltSet(cls, altsets:list): + return any(len(alts) > 1 for alts in altsets) + + # + # Determines if every alternative subset in {@code altsets} is equivalent. + # + # @param altsets a collection of alternative subsets + # @return {@code true} if every member of {@code altsets} is equal to the + # others, otherwise {@code false} + # + @classmethod + def allSubsetsEqual(cls, altsets:list): + if not altsets: + return True + first = next(iter(altsets)) + return all(alts == first for alts in iter(altsets)) + + # + # Returns the unique alternative predicted by all alternative subsets in + # {@code altsets}. If no such alternative exists, this method returns + # {@link ATN#INVALID_ALT_NUMBER}. + # + # @param altsets a collection of alternative subsets + # + @classmethod + def getUniqueAlt(cls, altsets:list): + all = cls.getAlts(altsets) + if len(all)==1: + return next(iter(all)) + return ATN.INVALID_ALT_NUMBER + + # Gets the complete set of represented alternatives for a collection of + # alternative subsets. This method returns the union of each {@link BitSet} + # in {@code altsets}. + # + # @param altsets a collection of alternative subsets + # @return the set of represented alternatives in {@code altsets} + # + @classmethod + def getAlts(cls, altsets:list): + return set.union(*altsets) + + # + # This function gets the conflicting alt subsets from a configuration set. + # For each configuration {@code c} in {@code configs}: + # + #
+    # map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
+    # alt and not pred
+    # 
+ # + @classmethod + def getConflictingAltSubsets(cls, configs:ATNConfigSet): + configToAlts = dict() + for c in configs: + h = hash((c.state.stateNumber, c.context)) + alts = configToAlts.get(h, None) + if alts is None: + alts = set() + configToAlts[h] = alts + alts.add(c.alt) + return configToAlts.values() + + # + # Get a map from state to alt subset from a configuration set. For each + # configuration {@code c} in {@code configs}: + # + #
+    # map[c.{@link ATNConfig#state state}] U= c.{@link ATNConfig#alt alt}
+    # 
+ # + @classmethod + def getStateToAltMap(cls, configs:ATNConfigSet): + m = dict() + for c in configs: + alts = m.get(c.state, None) + if alts is None: + alts = set() + m[c.state] = alts + alts.add(c.alt) + return m + + @classmethod + def hasStateAssociatedWithOneAlt(cls, configs:ATNConfigSet): + return any(len(alts) == 1 for alts in cls.getStateToAltMap(configs).values()) + + @classmethod + def getSingleViableAlt(cls, altsets:list): + viableAlts = set() + for alts in altsets: + minAlt = min(alts) + viableAlts.add(minAlt) + if len(viableAlts)>1 : # more than 1 viable alt + return ATN.INVALID_ALT_NUMBER + return min(viableAlts) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/SemanticContext.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/SemanticContext.py new file mode 100644 index 0000000000000000000000000000000000000000..8f4dc31088d35b73304432c46c54e31c1ab92700 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/SemanticContext.py @@ -0,0 +1,323 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# A tree structure used to record the semantic context in which +# an ATN configuration is valid. It's either a single predicate, +# a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +# +#

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of +# {@link SemanticContext} within the scope of this outer class.

+# +from antlr4.Recognizer import Recognizer +from antlr4.RuleContext import RuleContext +from io import StringIO + + +class SemanticContext(object): + # + # The default {@link SemanticContext}, which is semantically equivalent to + # a predicate of the form {@code {true}?}. + # + NONE = None + + # + # For context independent predicates, we evaluate them without a local + # context (i.e., null context). That way, we can evaluate them without + # having to create proper rule-specific context during prediction (as + # opposed to the parser, which creates them naturally). In a practical + # sense, this avoids a cast exception from RuleContext to myruleContext. + # + #

For context dependent predicates, we must pass in a local context so that + # references such as $arg evaluate properly as _localctx.arg. We only + # capture context dependent predicates in the context in which we begin + # prediction, so we passed in the outer context here in case of context + # dependent predicate evaluation.

+ # + def eval(self, parser:Recognizer , outerContext:RuleContext ): + pass + + # + # Evaluate the precedence predicates for the context and reduce the result. + # + # @param parser The parser instance. + # @param outerContext The current parser context object. + # @return The simplified semantic context after precedence predicates are + # evaluated, which will be one of the following values. + # + # + def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext): + return self + +# need forward declaration +AND = None + +def andContext(a:SemanticContext, b:SemanticContext): + if a is None or a is SemanticContext.NONE: + return b + if b is None or b is SemanticContext.NONE: + return a + result = AND(a, b) + if len(result.opnds) == 1: + return result.opnds[0] + else: + return result + +# need forward declaration +OR = None + +def orContext(a:SemanticContext, b:SemanticContext): + if a is None: + return b + if b is None: + return a + if a is SemanticContext.NONE or b is SemanticContext.NONE: + return SemanticContext.NONE + result = OR(a, b) + if len(result.opnds) == 1: + return result.opnds[0] + else: + return result + +def filterPrecedencePredicates(collection:set): + return [context for context in collection if isinstance(context, PrecedencePredicate)] + + +class Predicate(SemanticContext): + __slots__ = ('ruleIndex', 'predIndex', 'isCtxDependent') + + def __init__(self, ruleIndex:int=-1, predIndex:int=-1, isCtxDependent:bool=False): + self.ruleIndex = ruleIndex + self.predIndex = predIndex + self.isCtxDependent = isCtxDependent # e.g., $i ref in pred + + def eval(self, parser:Recognizer , outerContext:RuleContext ): + localctx = outerContext if self.isCtxDependent else None + return parser.sempred(localctx, self.ruleIndex, self.predIndex) + + def __hash__(self): + return hash((self.ruleIndex, self.predIndex, self.isCtxDependent)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, Predicate): + return False + return self.ruleIndex == other.ruleIndex and \ + self.predIndex == other.predIndex and \ + self.isCtxDependent == other.isCtxDependent + + def __str__(self): + return "{" + str(self.ruleIndex) + ":" + str(self.predIndex) + "}?" + + +class PrecedencePredicate(SemanticContext): + + def __init__(self, precedence:int=0): + self.precedence = precedence + + def eval(self, parser:Recognizer , outerContext:RuleContext ): + return parser.precpred(outerContext, self.precedence) + + def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext): + if parser.precpred(outerContext, self.precedence): + return SemanticContext.NONE + else: + return None + + def __lt__(self, other): + return self.precedence < other.precedence + + def __hash__(self): + return 31 + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, PrecedencePredicate): + return False + else: + return self.precedence == other.precedence + +# A semantic context which is true whenever none of the contained contexts +# is false. +del AND +class AND(SemanticContext): + __slots__ = 'opnds' + + def __init__(self, a:SemanticContext, b:SemanticContext): + operands = set() + if isinstance( a, AND ): + operands.update(a.opnds) + else: + operands.add(a) + if isinstance( b, AND ): + operands.update(b.opnds) + else: + operands.add(b) + + precedencePredicates = filterPrecedencePredicates(operands) + if len(precedencePredicates)>0: + # interested in the transition with the lowest precedence + reduced = min(precedencePredicates) + operands.add(reduced) + + self.opnds = list(operands) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, AND): + return False + else: + return self.opnds == other.opnds + + def __hash__(self): + h = 0 + for o in self.opnds: + h = hash((h, o)) + return hash((h, "AND")) + + # + # {@inheritDoc} + # + #

+ # The evaluation of predicates by this context is short-circuiting, but + # unordered.

+ # + def eval(self, parser:Recognizer, outerContext:RuleContext): + return all(opnd.eval(parser, outerContext) for opnd in self.opnds) + + def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext): + differs = False + operands = [] + for context in self.opnds: + evaluated = context.evalPrecedence(parser, outerContext) + differs |= evaluated is not context + if evaluated is None: + # The AND context is false if any element is false + return None + elif evaluated is not SemanticContext.NONE: + # Reduce the result by skipping true elements + operands.append(evaluated) + + if not differs: + return self + + if len(operands)==0: + # all elements were true, so the AND context is true + return SemanticContext.NONE + + result = None + for o in operands: + result = o if result is None else andContext(result, o) + + return result + + def __str__(self): + with StringIO() as buf: + first = True + for o in self.opnds: + if not first: + buf.write("&&") + buf.write(str(o)) + first = False + return buf.getvalue() + +# +# A semantic context which is true whenever at least one of the contained +# contexts is true. +del OR +class OR (SemanticContext): + __slots__ = 'opnds' + + def __init__(self, a:SemanticContext, b:SemanticContext): + operands = set() + if isinstance( a, OR ): + operands.update(a.opnds) + else: + operands.add(a) + if isinstance( b, OR ): + operands.update(b.opnds) + else: + operands.add(b) + + precedencePredicates = filterPrecedencePredicates(operands) + if len(precedencePredicates)>0: + # interested in the transition with the highest precedence + s = sorted(precedencePredicates) + reduced = s[-1] + operands.add(reduced) + + self.opnds = list(operands) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, OR): + return False + else: + return self.opnds == other.opnds + + def __hash__(self): + h = 0 + for o in self.opnds: + h = hash((h, o)) + return hash((h, "OR")) + + #

+ # The evaluation of predicates by this context is short-circuiting, but + # unordered.

+ # + def eval(self, parser:Recognizer, outerContext:RuleContext): + return any(opnd.eval(parser, outerContext) for opnd in self.opnds) + + def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext): + differs = False + operands = [] + for context in self.opnds: + evaluated = context.evalPrecedence(parser, outerContext) + differs |= evaluated is not context + if evaluated is SemanticContext.NONE: + # The OR context is true if any element is true + return SemanticContext.NONE + elif evaluated is not None: + # Reduce the result by skipping false elements + operands.append(evaluated) + + if not differs: + return self + + if len(operands)==0: + # all elements were false, so the OR context is false + return None + + result = None + for o in operands: + result = o if result is None else orContext(result, o) + + return result + + def __str__(self): + with StringIO() as buf: + first = True + for o in self.opnds: + if not first: + buf.write("||") + buf.write(str(o)) + first = False + return buf.getvalue() + + +SemanticContext.NONE = Predicate() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/Transition.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/Transition.py new file mode 100644 index 0000000000000000000000000000000000000000..2e4c9971763c34dbb2690660434c5c99d44193e1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/Transition.py @@ -0,0 +1,268 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# An ATN transition between any two ATN states. Subclasses define +# atom, set, epsilon, action, predicate, rule transitions. +# +#

This is a one way link. It emanates from a state (usually via a list of +# transitions) and has a target state.

+# +#

Since we never have to change the ATN transitions once we construct it, +# we can fix these transitions as specific classes. The DFA transitions +# on the other hand need to update the labels as it adds transitions to +# the states. We'll use the term Edge for the DFA to distinguish them from +# ATN transitions.

+# +from antlr4.IntervalSet import IntervalSet +from antlr4.Token import Token + +# need forward declarations +from antlr4.atn.SemanticContext import Predicate, PrecedencePredicate + +ATNState = None +RuleStartState = None + +class Transition (object): + __slots__ = ('target','isEpsilon','label') + + # constants for serialization + EPSILON = 1 + RANGE = 2 + RULE = 3 + PREDICATE = 4 # e.g., {isType(input.LT(1))}? + ATOM = 5 + ACTION = 6 + SET = 7 # ~(A|B) or ~atom, wildcard, which convert to next 2 + NOT_SET = 8 + WILDCARD = 9 + PRECEDENCE = 10 + + serializationNames = [ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE" + ] + + serializationTypes = dict() + + def __init__(self, target:ATNState): + # The target of this transition. + if target is None: + raise Exception("target cannot be null.") + self.target = target + # Are we epsilon, action, sempred? + self.isEpsilon = False + self.label = None + + +# TODO: make all transitions sets? no, should remove set edges +class AtomTransition(Transition): + __slots__ = ('label_', 'serializationType') + + def __init__(self, target:ATNState, label:int): + super().__init__(target) + self.label_ = label # The token type or character value; or, signifies special label. + self.label = self.makeLabel() + self.serializationType = self.ATOM + + def makeLabel(self): + s = IntervalSet() + s.addOne(self.label_) + return s + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return self.label_ == symbol + + def __str__(self): + return str(self.label_) + +class RuleTransition(Transition): + __slots__ = ('ruleIndex', 'precedence', 'followState', 'serializationType') + + def __init__(self, ruleStart:RuleStartState, ruleIndex:int, precedence:int, followState:ATNState): + super().__init__(ruleStart) + self.ruleIndex = ruleIndex # ptr to the rule definition object for this rule ref + self.precedence = precedence + self.followState = followState # what node to begin computations following ref to rule + self.serializationType = self.RULE + self.isEpsilon = True + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return False + + +class EpsilonTransition(Transition): + __slots__ = ('serializationType', 'outermostPrecedenceReturn') + + def __init__(self, target, outermostPrecedenceReturn=-1): + super(EpsilonTransition, self).__init__(target) + self.serializationType = self.EPSILON + self.isEpsilon = True + self.outermostPrecedenceReturn = outermostPrecedenceReturn + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return False + + def __str__(self): + return "epsilon" + +class RangeTransition(Transition): + __slots__ = ('serializationType', 'start', 'stop') + + def __init__(self, target:ATNState, start:int, stop:int): + super().__init__(target) + self.serializationType = self.RANGE + self.start = start + self.stop = stop + self.label = self.makeLabel() + + def makeLabel(self): + s = IntervalSet() + s.addRange(range(self.start, self.stop + 1)) + return s + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return symbol >= self.start and symbol <= self.stop + + def __str__(self): + return "'" + chr(self.start) + "'..'" + chr(self.stop) + "'" + +class AbstractPredicateTransition(Transition): + + def __init__(self, target:ATNState): + super().__init__(target) + + +class PredicateTransition(AbstractPredicateTransition): + __slots__ = ('serializationType', 'ruleIndex', 'predIndex', 'isCtxDependent') + + def __init__(self, target:ATNState, ruleIndex:int, predIndex:int, isCtxDependent:bool): + super().__init__(target) + self.serializationType = self.PREDICATE + self.ruleIndex = ruleIndex + self.predIndex = predIndex + self.isCtxDependent = isCtxDependent # e.g., $i ref in pred + self.isEpsilon = True + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return False + + def getPredicate(self): + return Predicate(self.ruleIndex, self.predIndex, self.isCtxDependent) + + def __str__(self): + return "pred_" + str(self.ruleIndex) + ":" + str(self.predIndex) + +class ActionTransition(Transition): + __slots__ = ('serializationType', 'ruleIndex', 'actionIndex', 'isCtxDependent') + + def __init__(self, target:ATNState, ruleIndex:int, actionIndex:int=-1, isCtxDependent:bool=False): + super().__init__(target) + self.serializationType = self.ACTION + self.ruleIndex = ruleIndex + self.actionIndex = actionIndex + self.isCtxDependent = isCtxDependent # e.g., $i ref in pred + self.isEpsilon = True + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return False + + def __str__(self): + return "action_"+self.ruleIndex+":"+self.actionIndex + +# A transition containing a set of values. +class SetTransition(Transition): + __slots__ = 'serializationType' + + def __init__(self, target:ATNState, set:IntervalSet): + super().__init__(target) + self.serializationType = self.SET + if set is not None: + self.label = set + else: + self.label = IntervalSet() + self.label.addRange(range(Token.INVALID_TYPE, Token.INVALID_TYPE + 1)) + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return symbol in self.label + + def __str__(self): + return str(self.label) + +class NotSetTransition(SetTransition): + + def __init__(self, target:ATNState, set:IntervalSet): + super().__init__(target, set) + self.serializationType = self.NOT_SET + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return symbol >= minVocabSymbol \ + and symbol <= maxVocabSymbol \ + and not super(type(self), self).matches(symbol, minVocabSymbol, maxVocabSymbol) + + def __str__(self): + return '~' + super(type(self), self).__str__() + + +class WildcardTransition(Transition): + __slots__ = 'serializationType' + + def __init__(self, target:ATNState): + super().__init__(target) + self.serializationType = self.WILDCARD + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return symbol >= minVocabSymbol and symbol <= maxVocabSymbol + + def __str__(self): + return "." + + +class PrecedencePredicateTransition(AbstractPredicateTransition): + __slots__ = ('serializationType', 'precedence') + + def __init__(self, target:ATNState, precedence:int): + super().__init__(target) + self.serializationType = self.PRECEDENCE + self.precedence = precedence + self.isEpsilon = True + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return False + + + def getPredicate(self): + return PrecedencePredicate(self.precedence) + + def __str__(self): + return self.precedence + " >= _p" + + +Transition.serializationTypes = { + EpsilonTransition: Transition.EPSILON, + RangeTransition: Transition.RANGE, + RuleTransition: Transition.RULE, + PredicateTransition: Transition.PREDICATE, + AtomTransition: Transition.ATOM, + ActionTransition: Transition.ACTION, + SetTransition: Transition.SET, + NotSetTransition: Transition.NOT_SET, + WildcardTransition: Transition.WILDCARD, + PrecedencePredicateTransition: Transition.PRECEDENCE + } + +del ATNState +del RuleStartState + +from antlr4.atn.ATNState import * diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..216c000dc5ffc8e53cc9c596e420c1e67604d1aa --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/__init__.py @@ -0,0 +1 @@ +__author__ = 'ericvergnaud' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFA.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFA.py new file mode 100644 index 0000000000000000000000000000000000000000..d80589a6834a48cc6bb718bf33f57156e32ec934 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFA.py @@ -0,0 +1,133 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +from antlr4.atn.ATNState import StarLoopEntryState + +from antlr4.atn.ATNConfigSet import ATNConfigSet +from antlr4.atn.ATNState import DecisionState +from antlr4.dfa.DFAState import DFAState +from antlr4.error.Errors import IllegalStateException + + +class DFA(object): + __slots__ = ('atnStartState', 'decision', '_states', 's0', 'precedenceDfa') + + def __init__(self, atnStartState:DecisionState, decision:int=0): + # From which ATN state did we create this DFA? + self.atnStartState = atnStartState + self.decision = decision + # A set of all DFA states. Use {@link Map} so we can get old state back + # ({@link Set} only allows you to see if it's there). + self._states = dict() + self.s0 = None + # {@code true} if this DFA is for a precedence decision; otherwise, + # {@code false}. This is the backing field for {@link #isPrecedenceDfa}, + # {@link #setPrecedenceDfa}. + self.precedenceDfa = False + + if isinstance(atnStartState, StarLoopEntryState): + if atnStartState.isPrecedenceDecision: + self.precedenceDfa = True + precedenceState = DFAState(configs=ATNConfigSet()) + precedenceState.edges = [] + precedenceState.isAcceptState = False + precedenceState.requiresFullContext = False + self.s0 = precedenceState + + + # Get the start state for a specific precedence value. + # + # @param precedence The current precedence. + # @return The start state corresponding to the specified precedence, or + # {@code null} if no start state exists for the specified precedence. + # + # @throws IllegalStateException if this is not a precedence DFA. + # @see #isPrecedenceDfa() + + def getPrecedenceStartState(self, precedence:int): + if not self.precedenceDfa: + raise IllegalStateException("Only precedence DFAs may contain a precedence start state.") + + # s0.edges is never null for a precedence DFA + if precedence < 0 or precedence >= len(self.s0.edges): + return None + return self.s0.edges[precedence] + + # Set the start state for a specific precedence value. + # + # @param precedence The current precedence. + # @param startState The start state corresponding to the specified + # precedence. + # + # @throws IllegalStateException if this is not a precedence DFA. + # @see #isPrecedenceDfa() + # + def setPrecedenceStartState(self, precedence:int, startState:DFAState): + if not self.precedenceDfa: + raise IllegalStateException("Only precedence DFAs may contain a precedence start state.") + + if precedence < 0: + return + + # synchronization on s0 here is ok. when the DFA is turned into a + # precedence DFA, s0 will be initialized once and not updated again + # s0.edges is never null for a precedence DFA + if precedence >= len(self.s0.edges): + ext = [None] * (precedence + 1 - len(self.s0.edges)) + self.s0.edges.extend(ext) + self.s0.edges[precedence] = startState + # + # Sets whether this is a precedence DFA. If the specified value differs + # from the current DFA configuration, the following actions are taken; + # otherwise no changes are made to the current DFA. + # + # + # + # @param precedenceDfa {@code true} if this is a precedence DFA; otherwise, + # {@code false} + + def setPrecedenceDfa(self, precedenceDfa:bool): + if self.precedenceDfa != precedenceDfa: + self._states = dict() + if precedenceDfa: + precedenceState = DFAState(configs=ATNConfigSet()) + precedenceState.edges = [] + precedenceState.isAcceptState = False + precedenceState.requiresFullContext = False + self.s0 = precedenceState + else: + self.s0 = None + self.precedenceDfa = precedenceDfa + + @property + def states(self): + return self._states + + # Return a list of all states in this DFA, ordered by state number. + def sortedStates(self): + return sorted(self._states.keys(), key=lambda state: state.stateNumber) + + def __str__(self): + return self.toString(None) + + def toString(self, literalNames:list=None, symbolicNames:list=None): + if self.s0 is None: + return "" + from antlr4.dfa.DFASerializer import DFASerializer + serializer = DFASerializer(self,literalNames,symbolicNames) + return str(serializer) + + def toLexerString(self): + if self.s0 is None: + return "" + from antlr4.dfa.DFASerializer import LexerDFASerializer + serializer = LexerDFASerializer(self) + return str(serializer) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFASerializer.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFASerializer.py new file mode 100644 index 0000000000000000000000000000000000000000..bca0727b76dc54909be0bf60b6d636ec8f539927 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFASerializer.py @@ -0,0 +1,73 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ + +# A DFA walker that knows how to dump them to serialized strings.#/ +from io import StringIO +from antlr4 import DFA +from antlr4.Utils import str_list +from antlr4.dfa.DFAState import DFAState + + +class DFASerializer(object): + __slots__ = ('dfa', 'literalNames', 'symbolicNames') + + def __init__(self, dfa:DFA, literalNames:list=None, symbolicNames:list=None): + self.dfa = dfa + self.literalNames = literalNames + self.symbolicNames = symbolicNames + + def __str__(self): + if self.dfa.s0 is None: + return None + with StringIO() as buf: + for s in self.dfa.sortedStates(): + n = 0 + if s.edges is not None: + n = len(s.edges) + for i in range(0, n): + t = s.edges[i] + if t is not None and t.stateNumber != 0x7FFFFFFF: + buf.write(self.getStateString(s)) + label = self.getEdgeLabel(i) + buf.write("-") + buf.write(label) + buf.write("->") + buf.write(self.getStateString(t)) + buf.write('\n') + output = buf.getvalue() + if len(output)==0: + return None + else: + return output + + def getEdgeLabel(self, i:int): + if i==0: + return "EOF" + if self.literalNames is not None and i<=len(self.literalNames): + return self.literalNames[i-1] + elif self.symbolicNames is not None and i<=len(self.symbolicNames): + return self.symbolicNames[i-1] + else: + return str(i-1) + + def getStateString(self, s:DFAState): + n = s.stateNumber + baseStateStr = ( ":" if s.isAcceptState else "") + "s" + str(n) + ( "^" if s.requiresFullContext else "") + if s.isAcceptState: + if s.predicates is not None: + return baseStateStr + "=>" + str_list(s.predicates) + else: + return baseStateStr + "=>" + str(s.prediction) + else: + return baseStateStr + +class LexerDFASerializer(DFASerializer): + + def __init__(self, dfa:DFA): + super().__init__(dfa, None) + + def getEdgeLabel(self, i:int): + return "'" + chr(i) + "'" diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFAState.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFAState.py new file mode 100644 index 0000000000000000000000000000000000000000..51955a448886ea1fa34f0f4dff7fb0976edd1975 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFAState.py @@ -0,0 +1,126 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ + +# Map a predicate to a predicted alternative.#/ +from io import StringIO +from antlr4.atn.ATNConfigSet import ATNConfigSet +from antlr4.atn.SemanticContext import SemanticContext + + +class PredPrediction(object): + __slots__ = ('alt', 'pred') + + def __init__(self, pred:SemanticContext, alt:int): + self.alt = alt + self.pred = pred + + def __str__(self): + return "(" + str(self.pred) + ", " + str(self.alt) + ")" + +# A DFA state represents a set of possible ATN configurations. +# As Aho, Sethi, Ullman p. 117 says "The DFA uses its state +# to keep track of all possible states the ATN can be in after +# reading each input symbol. That is to say, after reading +# input a1a2..an, the DFA is in a state that represents the +# subset T of the states of the ATN that are reachable from the +# ATN's start state along some path labeled a1a2..an." +# In conventional NFA→DFA conversion, therefore, the subset T +# would be a bitset representing the set of states the +# ATN could be in. We need to track the alt predicted by each +# state as well, however. More importantly, we need to maintain +# a stack of states, tracking the closure operations as they +# jump from rule to rule, emulating rule invocations (method calls). +# I have to add a stack to simulate the proper lookahead sequences for +# the underlying LL grammar from which the ATN was derived. +# +#

I use a set of ATNConfig objects not simple states. An ATNConfig +# is both a state (ala normal conversion) and a RuleContext describing +# the chain of rules (if any) followed to arrive at that state.

+# +#

A DFA state may have multiple references to a particular state, +# but with different ATN contexts (with same or different alts) +# meaning that state was reached via a different set of rule invocations.

+#/ +class DFAState(object): + __slots__ = ( + 'stateNumber', 'configs', 'edges', 'isAcceptState', 'prediction', + 'lexerActionExecutor', 'requiresFullContext', 'predicates' + ) + + def __init__(self, stateNumber:int=-1, configs:ATNConfigSet=ATNConfigSet()): + self.stateNumber = stateNumber + self.configs = configs + # {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1) + # {@link Token#EOF} maps to {@code edges[0]}. + self.edges = None + self.isAcceptState = False + # if accept state, what ttype do we match or alt do we predict? + # This is set to {@link ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or + # {@link #requiresFullContext}. + self.prediction = 0 + self.lexerActionExecutor = None + # Indicates that this state was created during SLL prediction that + # discovered a conflict between the configurations in the state. Future + # {@link ParserATNSimulator#execATN} invocations immediately jumped doing + # full context prediction if this field is true. + self.requiresFullContext = False + # During SLL parsing, this is a list of predicates associated with the + # ATN configurations of the DFA state. When we have predicates, + # {@link #requiresFullContext} is {@code false} since full context prediction evaluates predicates + # on-the-fly. If this is not null, then {@link #prediction} is + # {@link ATN#INVALID_ALT_NUMBER}. + # + #

We only use these for non-{@link #requiresFullContext} but conflicting states. That + # means we know from the context (it's $ or we don't dip into outer + # context) that it's an ambiguity not a conflict.

+ # + #

This list is computed by {@link ParserATNSimulator#predicateDFAState}.

+ self.predicates = None + + + + # Get the set of all alts mentioned by all ATN configurations in this + # DFA state. + def getAltSet(self): + if self.configs is not None: + return set(cfg.alt for cfg in self.configs) or None + return None + + def __hash__(self): + return hash(self.configs) + + # Two {@link DFAState} instances are equal if their ATN configuration sets + # are the same. This method is used to see if a state already exists. + # + #

Because the number of alternatives and number of ATN configurations are + # finite, there is a finite number of DFA states that can be processed. + # This is necessary to show that the algorithm terminates.

+ # + #

Cannot test the DFA state numbers here because in + # {@link ParserATNSimulator#addDFAState} we need to know if any other state + # exists that has this exact set of ATN configurations. The + # {@link #stateNumber} is irrelevant.

+ def __eq__(self, other): + # compare set of ATN configurations in this set with other + if self is other: + return True + elif not isinstance(other, DFAState): + return False + else: + return self.configs==other.configs + + def __str__(self): + with StringIO() as buf: + buf.write(str(self.stateNumber)) + buf.write(":") + buf.write(str(self.configs)) + if self.isAcceptState: + buf.write("=>") + if self.predicates is not None: + buf.write(str(self.predicates)) + else: + buf.write(str(self.prediction)) + return buf.getvalue() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..216c000dc5ffc8e53cc9c596e420c1e67604d1aa --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/__init__.py @@ -0,0 +1 @@ +__author__ = 'ericvergnaud' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/DiagnosticErrorListener.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/DiagnosticErrorListener.py new file mode 100644 index 0000000000000000000000000000000000000000..32ac14b63579ce7c984c2e34f2b1c80bebe328ed --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/DiagnosticErrorListener.py @@ -0,0 +1,107 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + + +# +# This implementation of {@link ANTLRErrorListener} can be used to identify +# certain potential correctness and performance problems in grammars. "Reports" +# are made by calling {@link Parser#notifyErrorListeners} with the appropriate +# message. +# +# + +from io import StringIO +from antlr4 import Parser, DFA +from antlr4.atn.ATNConfigSet import ATNConfigSet +from antlr4.error.ErrorListener import ErrorListener + +class DiagnosticErrorListener(ErrorListener): + + def __init__(self, exactOnly:bool=True): + # whether all ambiguities or only exact ambiguities are reported. + self.exactOnly = exactOnly + + def reportAmbiguity(self, recognizer:Parser, dfa:DFA, startIndex:int, + stopIndex:int, exact:bool, ambigAlts:set, configs:ATNConfigSet): + if self.exactOnly and not exact: + return + + with StringIO() as buf: + buf.write("reportAmbiguity d=") + buf.write(self.getDecisionDescription(recognizer, dfa)) + buf.write(": ambigAlts=") + buf.write(str(self.getConflictingAlts(ambigAlts, configs))) + buf.write(", input='") + buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex)) + buf.write("'") + recognizer.notifyErrorListeners(buf.getvalue()) + + + def reportAttemptingFullContext(self, recognizer:Parser, dfa:DFA, startIndex:int, + stopIndex:int, conflictingAlts:set, configs:ATNConfigSet): + with StringIO() as buf: + buf.write("reportAttemptingFullContext d=") + buf.write(self.getDecisionDescription(recognizer, dfa)) + buf.write(", input='") + buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex)) + buf.write("'") + recognizer.notifyErrorListeners(buf.getvalue()) + + def reportContextSensitivity(self, recognizer:Parser, dfa:DFA, startIndex:int, + stopIndex:int, prediction:int, configs:ATNConfigSet): + with StringIO() as buf: + buf.write("reportContextSensitivity d=") + buf.write(self.getDecisionDescription(recognizer, dfa)) + buf.write(", input='") + buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex)) + buf.write("'") + recognizer.notifyErrorListeners(buf.getvalue()) + + def getDecisionDescription(self, recognizer:Parser, dfa:DFA): + decision = dfa.decision + ruleIndex = dfa.atnStartState.ruleIndex + + ruleNames = recognizer.ruleNames + if ruleIndex < 0 or ruleIndex >= len(ruleNames): + return str(decision) + + ruleName = ruleNames[ruleIndex] + if ruleName is None or len(ruleName)==0: + return str(decision) + + return str(decision) + " (" + ruleName + ")" + + # + # Computes the set of conflicting or ambiguous alternatives from a + # configuration set, if that information was not already provided by the + # parser. + # + # @param reportedAlts The set of conflicting or ambiguous alternatives, as + # reported by the parser. + # @param configs The conflicting or ambiguous configuration set. + # @return Returns {@code reportedAlts} if it is not {@code null}, otherwise + # returns the set of alternatives represented in {@code configs}. + # + def getConflictingAlts(self, reportedAlts:set, configs:ATNConfigSet): + if reportedAlts is not None: + return reportedAlts + + result = set() + for config in configs: + result.add(config.alt) + + return result diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorListener.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorListener.py new file mode 100644 index 0000000000000000000000000000000000000000..933264d431b9829f43a38d5f0f07c83bbad703a0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorListener.py @@ -0,0 +1,72 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. + +# Provides an empty default implementation of {@link ANTLRErrorListener}. The +# default implementation of each method does nothing, but can be overridden as +# necessary. + + +import sys + +class ErrorListener(object): + + def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e): + pass + + def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs): + pass + + def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs): + pass + + def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs): + pass + +class ConsoleErrorListener(ErrorListener): + # + # Provides a default instance of {@link ConsoleErrorListener}. + # + INSTANCE = None + + # + # {@inheritDoc} + # + #

+ # This implementation prints messages to {@link System#err} containing the + # values of {@code line}, {@code charPositionInLine}, and {@code msg} using + # the following format.

+ # + #
+    # line line:charPositionInLine msg
+    # 
+ # + def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e): + print("line " + str(line) + ":" + str(column) + " " + msg, file=sys.stderr) + +ConsoleErrorListener.INSTANCE = ConsoleErrorListener() + +class ProxyErrorListener(ErrorListener): + + def __init__(self, delegates): + super().__init__() + if delegates is None: + raise ReferenceError("delegates") + self.delegates = delegates + + def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e): + for delegate in self.delegates: + delegate.syntaxError(recognizer, offendingSymbol, line, column, msg, e) + + def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs): + for delegate in self.delegates: + delegate.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + + def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs): + for delegate in self.delegates: + delegate.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) + + def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs): + for delegate in self.delegates: + delegate.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorStrategy.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorStrategy.py new file mode 100644 index 0000000000000000000000000000000000000000..0f7caadb240445e6d997ad582a51836f95cab5c5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorStrategy.py @@ -0,0 +1,709 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# +import sys +from antlr4.IntervalSet import IntervalSet + +from antlr4.Token import Token +from antlr4.atn.ATNState import ATNState +from antlr4.error.Errors import RecognitionException, NoViableAltException, InputMismatchException, \ + FailedPredicateException, ParseCancellationException + +# need forward declaration +Parser = None + +class ErrorStrategy(object): + + def reset(self, recognizer:Parser): + pass + + def recoverInline(self, recognizer:Parser): + pass + + def recover(self, recognizer:Parser, e:RecognitionException): + pass + + def sync(self, recognizer:Parser): + pass + + def inErrorRecoveryMode(self, recognizer:Parser): + pass + + def reportError(self, recognizer:Parser, e:RecognitionException): + pass + + +# This is the default implementation of {@link ANTLRErrorStrategy} used for +# error reporting and recovery in ANTLR parsers. +# +class DefaultErrorStrategy(ErrorStrategy): + + def __init__(self): + super().__init__() + # Indicates whether the error strategy is currently "recovering from an + # error". This is used to suppress reporting multiple error messages while + # attempting to recover from a detected syntax error. + # + # @see #inErrorRecoveryMode + # + self.errorRecoveryMode = False + + # The index into the input stream where the last error occurred. + # This is used to prevent infinite loops where an error is found + # but no token is consumed during recovery...another error is found, + # ad nauseum. This is a failsafe mechanism to guarantee that at least + # one token/tree node is consumed for two errors. + # + self.lastErrorIndex = -1 + self.lastErrorStates = None + self.nextTokensContext = None + self.nextTokenState = 0 + + #

The default implementation simply calls {@link #endErrorCondition} to + # ensure that the handler is not in error recovery mode.

+ def reset(self, recognizer:Parser): + self.endErrorCondition(recognizer) + + # + # This method is called to enter error recovery mode when a recognition + # exception is reported. + # + # @param recognizer the parser instance + # + def beginErrorCondition(self, recognizer:Parser): + self.errorRecoveryMode = True + + def inErrorRecoveryMode(self, recognizer:Parser): + return self.errorRecoveryMode + + # + # This method is called to leave error recovery mode after recovering from + # a recognition exception. + # + # @param recognizer + # + def endErrorCondition(self, recognizer:Parser): + self.errorRecoveryMode = False + self.lastErrorStates = None + self.lastErrorIndex = -1 + + # + # {@inheritDoc} + # + #

The default implementation simply calls {@link #endErrorCondition}.

+ # + def reportMatch(self, recognizer:Parser): + self.endErrorCondition(recognizer) + + # + # {@inheritDoc} + # + #

The default implementation returns immediately if the handler is already + # in error recovery mode. Otherwise, it calls {@link #beginErrorCondition} + # and dispatches the reporting task based on the runtime type of {@code e} + # according to the following table.

+ # + # + # + def reportError(self, recognizer:Parser, e:RecognitionException): + # if we've already reported an error and have not matched a token + # yet successfully, don't report any errors. + if self.inErrorRecoveryMode(recognizer): + return # don't report spurious errors + self.beginErrorCondition(recognizer) + if isinstance( e, NoViableAltException ): + self.reportNoViableAlternative(recognizer, e) + elif isinstance( e, InputMismatchException ): + self.reportInputMismatch(recognizer, e) + elif isinstance( e, FailedPredicateException ): + self.reportFailedPredicate(recognizer, e) + else: + print("unknown recognition error type: " + type(e).__name__) + recognizer.notifyErrorListeners(e.message, e.offendingToken, e) + + # + # {@inheritDoc} + # + #

The default implementation resynchronizes the parser by consuming tokens + # until we find one in the resynchronization set--loosely the set of tokens + # that can follow the current rule.

+ # + def recover(self, recognizer:Parser, e:RecognitionException): + if self.lastErrorIndex==recognizer.getInputStream().index \ + and self.lastErrorStates is not None \ + and recognizer.state in self.lastErrorStates: + # uh oh, another error at same token index and previously-visited + # state in ATN; must be a case where LT(1) is in the recovery + # token set so nothing got consumed. Consume a single token + # at least to prevent an infinite loop; this is a failsafe. + recognizer.consume() + + self.lastErrorIndex = recognizer._input.index + if self.lastErrorStates is None: + self.lastErrorStates = [] + self.lastErrorStates.append(recognizer.state) + followSet = self.getErrorRecoverySet(recognizer) + self.consumeUntil(recognizer, followSet) + + # The default implementation of {@link ANTLRErrorStrategy#sync} makes sure + # that the current lookahead symbol is consistent with what were expecting + # at this point in the ATN. You can call this anytime but ANTLR only + # generates code to check before subrules/loops and each iteration. + # + #

Implements Jim Idle's magic sync mechanism in closures and optional + # subrules. E.g.,

+ # + #
+    # a : sync ( stuff sync )* ;
+    # sync : {consume to what can follow sync} ;
+    # 
+ # + # At the start of a sub rule upon error, {@link #sync} performs single + # token deletion, if possible. If it can't do that, it bails on the current + # rule and uses the default error recovery, which consumes until the + # resynchronization set of the current rule. + # + #

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block + # with an empty alternative), then the expected set includes what follows + # the subrule.

+ # + #

During loop iteration, it consumes until it sees a token that can start a + # sub rule or what follows loop. Yes, that is pretty aggressive. We opt to + # stay in the loop as long as possible.

+ # + #

ORIGINS

+ # + #

Previous versions of ANTLR did a poor job of their recovery within loops. + # A single mismatch token or missing token would force the parser to bail + # out of the entire rules surrounding the loop. So, for rule

+ # + #
+    # classDef : 'class' ID '{' member* '}'
+    # 
+ # + # input with an extra token between members would force the parser to + # consume until it found the next class definition rather than the next + # member definition of the current class. + # + #

This functionality cost a little bit of effort because the parser has to + # compare token set at the start of the loop and at each iteration. If for + # some reason speed is suffering for you, you can turn off this + # functionality by simply overriding this method as a blank { }.

+ # + def sync(self, recognizer:Parser): + # If already recovering, don't try to sync + if self.inErrorRecoveryMode(recognizer): + return + + s = recognizer._interp.atn.states[recognizer.state] + la = recognizer.getTokenStream().LA(1) + # try cheaper subset first; might get lucky. seems to shave a wee bit off + nextTokens = recognizer.atn.nextTokens(s) + if la in nextTokens: + self.nextTokensContext = None + self.nextTokenState = ATNState.INVALID_STATE_NUMBER + return + elif Token.EPSILON in nextTokens: + if self.nextTokensContext is None: + # It's possible the next token won't match information tracked + # by sync is restricted for performance. + self.nextTokensContext = recognizer._ctx + self.nextTokensState = recognizer._stateNumber + return + + if s.stateType in [ATNState.BLOCK_START, ATNState.STAR_BLOCK_START, + ATNState.PLUS_BLOCK_START, ATNState.STAR_LOOP_ENTRY]: + # report error and recover if possible + if self.singleTokenDeletion(recognizer)is not None: + return + else: + raise InputMismatchException(recognizer) + + elif s.stateType in [ATNState.PLUS_LOOP_BACK, ATNState.STAR_LOOP_BACK]: + self.reportUnwantedToken(recognizer) + expecting = recognizer.getExpectedTokens() + whatFollowsLoopIterationOrRule = expecting.addSet(self.getErrorRecoverySet(recognizer)) + self.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + + else: + # do nothing if we can't identify the exact kind of ATN state + pass + + # This is called by {@link #reportError} when the exception is a + # {@link NoViableAltException}. + # + # @see #reportError + # + # @param recognizer the parser instance + # @param e the recognition exception + # + def reportNoViableAlternative(self, recognizer:Parser, e:NoViableAltException): + tokens = recognizer.getTokenStream() + if tokens is not None: + if e.startToken.type==Token.EOF: + input = "" + else: + input = tokens.getText(e.startToken, e.offendingToken) + else: + input = "" + msg = "no viable alternative at input " + self.escapeWSAndQuote(input) + recognizer.notifyErrorListeners(msg, e.offendingToken, e) + + # + # This is called by {@link #reportError} when the exception is an + # {@link InputMismatchException}. + # + # @see #reportError + # + # @param recognizer the parser instance + # @param e the recognition exception + # + def reportInputMismatch(self, recognizer:Parser, e:InputMismatchException): + msg = "mismatched input " + self.getTokenErrorDisplay(e.offendingToken) \ + + " expecting " + e.getExpectedTokens().toString(recognizer.literalNames, recognizer.symbolicNames) + recognizer.notifyErrorListeners(msg, e.offendingToken, e) + + # + # This is called by {@link #reportError} when the exception is a + # {@link FailedPredicateException}. + # + # @see #reportError + # + # @param recognizer the parser instance + # @param e the recognition exception + # + def reportFailedPredicate(self, recognizer, e): + ruleName = recognizer.ruleNames[recognizer._ctx.getRuleIndex()] + msg = "rule " + ruleName + " " + e.message + recognizer.notifyErrorListeners(msg, e.offendingToken, e) + + # This method is called to report a syntax error which requires the removal + # of a token from the input stream. At the time this method is called, the + # erroneous symbol is current {@code LT(1)} symbol and has not yet been + # removed from the input stream. When this method returns, + # {@code recognizer} is in error recovery mode. + # + #

This method is called when {@link #singleTokenDeletion} identifies + # single-token deletion as a viable recovery strategy for a mismatched + # input error.

+ # + #

The default implementation simply returns if the handler is already in + # error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to + # enter error recovery mode, followed by calling + # {@link Parser#notifyErrorListeners}.

+ # + # @param recognizer the parser instance + # + def reportUnwantedToken(self, recognizer:Parser): + if self.inErrorRecoveryMode(recognizer): + return + + self.beginErrorCondition(recognizer) + t = recognizer.getCurrentToken() + tokenName = self.getTokenErrorDisplay(t) + expecting = self.getExpectedTokens(recognizer) + msg = "extraneous input " + tokenName + " expecting " \ + + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) + recognizer.notifyErrorListeners(msg, t, None) + + # This method is called to report a syntax error which requires the + # insertion of a missing token into the input stream. At the time this + # method is called, the missing token has not yet been inserted. When this + # method returns, {@code recognizer} is in error recovery mode. + # + #

This method is called when {@link #singleTokenInsertion} identifies + # single-token insertion as a viable recovery strategy for a mismatched + # input error.

+ # + #

The default implementation simply returns if the handler is already in + # error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to + # enter error recovery mode, followed by calling + # {@link Parser#notifyErrorListeners}.

+ # + # @param recognizer the parser instance + # + def reportMissingToken(self, recognizer:Parser): + if self.inErrorRecoveryMode(recognizer): + return + self.beginErrorCondition(recognizer) + t = recognizer.getCurrentToken() + expecting = self.getExpectedTokens(recognizer) + msg = "missing " + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) \ + + " at " + self.getTokenErrorDisplay(t) + recognizer.notifyErrorListeners(msg, t, None) + + #

The default implementation attempts to recover from the mismatched input + # by using single token insertion and deletion as described below. If the + # recovery attempt fails, this method throws an + # {@link InputMismatchException}.

+ # + #

EXTRA TOKEN (single token deletion)

+ # + #

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the + # right token, however, then assume {@code LA(1)} is some extra spurious + # token and delete it. Then consume and return the next token (which was + # the {@code LA(2)} token) as the successful result of the match operation.

+ # + #

This recovery strategy is implemented by {@link #singleTokenDeletion}.

+ # + #

MISSING TOKEN (single token insertion)

+ # + #

If current token (at {@code LA(1)}) is consistent with what could come + # after the expected {@code LA(1)} token, then assume the token is missing + # and use the parser's {@link TokenFactory} to create it on the fly. The + # "insertion" is performed by returning the created token as the successful + # result of the match operation.

+ # + #

This recovery strategy is implemented by {@link #singleTokenInsertion}.

+ # + #

EXAMPLE

+ # + #

For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When + # the parser returns from the nested call to {@code expr}, it will have + # call chain:

+ # + #
+    # stat → expr → atom
+    # 
+ # + # and it will be trying to match the {@code ')'} at this point in the + # derivation: + # + #
+    # => ID '=' '(' INT ')' ('+' atom)* ';'
+    #                    ^
+    # 
+ # + # The attempt to match {@code ')'} will fail when it sees {@code ';'} and + # call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'} + # is in the set of tokens that can follow the {@code ')'} token reference + # in rule {@code atom}. It can assume that you forgot the {@code ')'}. + # + def recoverInline(self, recognizer:Parser): + # SINGLE TOKEN DELETION + matchedSymbol = self.singleTokenDeletion(recognizer) + if matchedSymbol is not None: + # we have deleted the extra token. + # now, move past ttype token as if all were ok + recognizer.consume() + return matchedSymbol + + # SINGLE TOKEN INSERTION + if self.singleTokenInsertion(recognizer): + return self.getMissingSymbol(recognizer) + + # even that didn't work; must throw the exception + raise InputMismatchException(recognizer) + + # + # This method implements the single-token insertion inline error recovery + # strategy. It is called by {@link #recoverInline} if the single-token + # deletion strategy fails to recover from the mismatched input. If this + # method returns {@code true}, {@code recognizer} will be in error recovery + # mode. + # + #

This method determines whether or not single-token insertion is viable by + # checking if the {@code LA(1)} input symbol could be successfully matched + # if it were instead the {@code LA(2)} symbol. If this method returns + # {@code true}, the caller is responsible for creating and inserting a + # token with the correct type to produce this behavior.

+ # + # @param recognizer the parser instance + # @return {@code true} if single-token insertion is a viable recovery + # strategy for the current mismatched input, otherwise {@code false} + # + def singleTokenInsertion(self, recognizer:Parser): + currentSymbolType = recognizer.getTokenStream().LA(1) + # if current token is consistent with what could come after current + # ATN state, then we know we're missing a token; error recovery + # is free to conjure up and insert the missing token + atn = recognizer._interp.atn + currentState = atn.states[recognizer.state] + next = currentState.transitions[0].target + expectingAtLL2 = atn.nextTokens(next, recognizer._ctx) + if currentSymbolType in expectingAtLL2: + self.reportMissingToken(recognizer) + return True + else: + return False + + # This method implements the single-token deletion inline error recovery + # strategy. It is called by {@link #recoverInline} to attempt to recover + # from mismatched input. If this method returns null, the parser and error + # handler state will not have changed. If this method returns non-null, + # {@code recognizer} will not be in error recovery mode since the + # returned token was a successful match. + # + #

If the single-token deletion is successful, this method calls + # {@link #reportUnwantedToken} to report the error, followed by + # {@link Parser#consume} to actually "delete" the extraneous token. Then, + # before returning {@link #reportMatch} is called to signal a successful + # match.

+ # + # @param recognizer the parser instance + # @return the successfully matched {@link Token} instance if single-token + # deletion successfully recovers from the mismatched input, otherwise + # {@code null} + # + def singleTokenDeletion(self, recognizer:Parser): + nextTokenType = recognizer.getTokenStream().LA(2) + expecting = self.getExpectedTokens(recognizer) + if nextTokenType in expecting: + self.reportUnwantedToken(recognizer) + # print("recoverFromMismatchedToken deleting " \ + # + str(recognizer.getTokenStream().LT(1)) \ + # + " since " + str(recognizer.getTokenStream().LT(2)) \ + # + " is what we want", file=sys.stderr) + recognizer.consume() # simply delete extra token + # we want to return the token we're actually matching + matchedSymbol = recognizer.getCurrentToken() + self.reportMatch(recognizer) # we know current token is correct + return matchedSymbol + else: + return None + + # Conjure up a missing token during error recovery. + # + # The recognizer attempts to recover from single missing + # symbols. But, actions might refer to that missing symbol. + # For example, x=ID {f($x);}. The action clearly assumes + # that there has been an identifier matched previously and that + # $x points at that token. If that token is missing, but + # the next token in the stream is what we want we assume that + # this token is missing and we keep going. Because we + # have to return some token to replace the missing token, + # we have to conjure one up. This method gives the user control + # over the tokens returned for missing tokens. Mostly, + # you will want to create something special for identifier + # tokens. For literals such as '{' and ',', the default + # action in the parser or tree parser works. It simply creates + # a CommonToken of the appropriate type. The text will be the token. + # If you change what tokens must be created by the lexer, + # override this method to create the appropriate tokens. + # + def getMissingSymbol(self, recognizer:Parser): + currentSymbol = recognizer.getCurrentToken() + expecting = self.getExpectedTokens(recognizer) + expectedTokenType = expecting[0] # get any element + if expectedTokenType==Token.EOF: + tokenText = "" + else: + name = None + if expectedTokenType < len(recognizer.literalNames): + name = recognizer.literalNames[expectedTokenType] + if name is None and expectedTokenType < len(recognizer.symbolicNames): + name = recognizer.symbolicNames[expectedTokenType] + tokenText = "" + current = currentSymbol + lookback = recognizer.getTokenStream().LT(-1) + if current.type==Token.EOF and lookback is not None: + current = lookback + return recognizer.getTokenFactory().create(current.source, + expectedTokenType, tokenText, Token.DEFAULT_CHANNEL, + -1, -1, current.line, current.column) + + def getExpectedTokens(self, recognizer:Parser): + return recognizer.getExpectedTokens() + + # How should a token be displayed in an error message? The default + # is to display just the text, but during development you might + # want to have a lot of information spit out. Override in that case + # to use t.toString() (which, for CommonToken, dumps everything about + # the token). This is better than forcing you to override a method in + # your token objects because you don't have to go modify your lexer + # so that it creates a new Java type. + # + def getTokenErrorDisplay(self, t:Token): + if t is None: + return "" + s = t.text + if s is None: + if t.type==Token.EOF: + s = "" + else: + s = "<" + str(t.type) + ">" + return self.escapeWSAndQuote(s) + + def escapeWSAndQuote(self, s:str): + s = s.replace("\n","\\n") + s = s.replace("\r","\\r") + s = s.replace("\t","\\t") + return "'" + s + "'" + + # Compute the error recovery set for the current rule. During + # rule invocation, the parser pushes the set of tokens that can + # follow that rule reference on the stack; this amounts to + # computing FIRST of what follows the rule reference in the + # enclosing rule. See LinearApproximator.FIRST(). + # This local follow set only includes tokens + # from within the rule; i.e., the FIRST computation done by + # ANTLR stops at the end of a rule. + # + # EXAMPLE + # + # When you find a "no viable alt exception", the input is not + # consistent with any of the alternatives for rule r. The best + # thing to do is to consume tokens until you see something that + # can legally follow a call to r#or* any rule that called r. + # You don't want the exact set of viable next tokens because the + # input might just be missing a token--you might consume the + # rest of the input looking for one of the missing tokens. + # + # Consider grammar: + # + # a : '[' b ']' + # | '(' b ')' + # ; + # b : c '^' INT ; + # c : ID + # | INT + # ; + # + # At each rule invocation, the set of tokens that could follow + # that rule is pushed on a stack. Here are the various + # context-sensitive follow sets: + # + # FOLLOW(b1_in_a) = FIRST(']') = ']' + # FOLLOW(b2_in_a) = FIRST(')') = ')' + # FOLLOW(c_in_b) = FIRST('^') = '^' + # + # Upon erroneous input "[]", the call chain is + # + # a -> b -> c + # + # and, hence, the follow context stack is: + # + # depth follow set start of rule execution + # 0 a (from main()) + # 1 ']' b + # 2 '^' c + # + # Notice that ')' is not included, because b would have to have + # been called from a different context in rule a for ')' to be + # included. + # + # For error recovery, we cannot consider FOLLOW(c) + # (context-sensitive or otherwise). We need the combined set of + # all context-sensitive FOLLOW sets--the set of all tokens that + # could follow any reference in the call chain. We need to + # resync to one of those tokens. Note that FOLLOW(c)='^' and if + # we resync'd to that token, we'd consume until EOF. We need to + # sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. + # In this case, for input "[]", LA(1) is ']' and in the set, so we would + # not consume anything. After printing an error, rule c would + # return normally. Rule b would not find the required '^' though. + # At this point, it gets a mismatched token error and throws an + # exception (since LA(1) is not in the viable following token + # set). The rule exception handler tries to recover, but finds + # the same recovery set and doesn't consume anything. Rule b + # exits normally returning to rule a. Now it finds the ']' (and + # with the successful match exits errorRecovery mode). + # + # So, you can see that the parser walks up the call chain looking + # for the token that was a member of the recovery set. + # + # Errors are not generated in errorRecovery mode. + # + # ANTLR's error recovery mechanism is based upon original ideas: + # + # "Algorithms + Data Structures = Programs" by Niklaus Wirth + # + # and + # + # "A note on error recovery in recursive descent parsers": + # http:#portal.acm.org/citation.cfm?id=947902.947905 + # + # Later, Josef Grosch had some good ideas: + # + # "Efficient and Comfortable Error Recovery in Recursive Descent + # Parsers": + # ftp:#www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip + # + # Like Grosch I implement context-sensitive FOLLOW sets that are combined + # at run-time upon error to avoid overhead during parsing. + # + def getErrorRecoverySet(self, recognizer:Parser): + atn = recognizer._interp.atn + ctx = recognizer._ctx + recoverSet = IntervalSet() + while ctx is not None and ctx.invokingState>=0: + # compute what follows who invoked us + invokingState = atn.states[ctx.invokingState] + rt = invokingState.transitions[0] + follow = atn.nextTokens(rt.followState) + recoverSet.addSet(follow) + ctx = ctx.parentCtx + recoverSet.removeOne(Token.EPSILON) + return recoverSet + + # Consume tokens until one matches the given token set.# + def consumeUntil(self, recognizer:Parser, set_:set): + ttype = recognizer.getTokenStream().LA(1) + while ttype != Token.EOF and not ttype in set_: + recognizer.consume() + ttype = recognizer.getTokenStream().LA(1) + + +# +# This implementation of {@link ANTLRErrorStrategy} responds to syntax errors +# by immediately canceling the parse operation with a +# {@link ParseCancellationException}. The implementation ensures that the +# {@link ParserRuleContext#exception} field is set for all parse tree nodes +# that were not completed prior to encountering the error. +# +#

+# This error strategy is useful in the following scenarios.

+# +#
    +#
  • Two-stage parsing: This error strategy allows the first +# stage of two-stage parsing to immediately terminate if an error is +# encountered, and immediately fall back to the second stage. In addition to +# avoiding wasted work by attempting to recover from errors here, the empty +# implementation of {@link BailErrorStrategy#sync} improves the performance of +# the first stage.
  • +#
  • Silent validation: When syntax errors are not being +# reported or logged, and the parse result is simply ignored if errors occur, +# the {@link BailErrorStrategy} avoids wasting work on recovering from errors +# when the result will be ignored either way.
  • +#
+# +#

+# {@code myparser.setErrorHandler(new BailErrorStrategy());}

+# +# @see Parser#setErrorHandler(ANTLRErrorStrategy) +# +class BailErrorStrategy(DefaultErrorStrategy): + # Instead of recovering from exception {@code e}, re-throw it wrapped + # in a {@link ParseCancellationException} so it is not caught by the + # rule function catches. Use {@link Exception#getCause()} to get the + # original {@link RecognitionException}. + # + def recover(self, recognizer:Parser, e:RecognitionException): + context = recognizer._ctx + while context is not None: + context.exception = e + context = context.parentCtx + raise ParseCancellationException(e) + + # Make sure we don't attempt to recover inline; if the parser + # successfully recovers, it won't throw an exception. + # + def recoverInline(self, recognizer:Parser): + self.recover(recognizer, InputMismatchException(recognizer)) + + # Make sure we don't attempt to recover from problems in subrules.# + def sync(self, recognizer:Parser): + pass + +del Parser \ No newline at end of file diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/Errors.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/Errors.py new file mode 100644 index 0000000000000000000000000000000000000000..e78ac05911d3c9569441fe376ff7d6c686c05c95 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/Errors.py @@ -0,0 +1,172 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# need forward declaration +Token = None +Lexer = None +Parser = None +TokenStream = None +ATNConfigSet = None +ParserRulecontext = None +PredicateTransition = None +BufferedTokenStream = None + +class UnsupportedOperationException(Exception): + + def __init__(self, msg:str): + super().__init__(msg) + +class IllegalStateException(Exception): + + def __init__(self, msg:str): + super().__init__(msg) + +class CancellationException(IllegalStateException): + + def __init__(self, msg:str): + super().__init__(msg) + +# The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +# 3 kinds of errors: prediction errors, failed predicate errors, and +# mismatched input errors. In each case, the parser knows where it is +# in the input, where it is in the ATN, the rule invocation stack, +# and what kind of problem occurred. + +from antlr4.InputStream import InputStream +from antlr4.ParserRuleContext import ParserRuleContext +from antlr4.Recognizer import Recognizer + +class RecognitionException(Exception): + + + def __init__(self, message:str=None, recognizer:Recognizer=None, input:InputStream=None, ctx:ParserRulecontext=None): + super().__init__(message) + self.message = message + self.recognizer = recognizer + self.input = input + self.ctx = ctx + # The current {@link Token} when an error occurred. Since not all streams + # support accessing symbols by index, we have to track the {@link Token} + # instance itself. + self.offendingToken = None + # Get the ATN state number the parser was in at the time the error + # occurred. For {@link NoViableAltException} and + # {@link LexerNoViableAltException} exceptions, this is the + # {@link DecisionState} number. For others, it is the state whose outgoing + # edge we couldn't match. + self.offendingState = -1 + if recognizer is not None: + self.offendingState = recognizer.state + + #

If the state number is not known, this method returns -1.

+ + # + # Gets the set of input symbols which could potentially follow the + # previously matched symbol at the time this exception was thrown. + # + #

If the set of expected tokens is not known and could not be computed, + # this method returns {@code null}.

+ # + # @return The set of token types that could potentially follow the current + # state in the ATN, or {@code null} if the information is not available. + #/ + def getExpectedTokens(self): + if self.recognizer is not None: + return self.recognizer.atn.getExpectedTokens(self.offendingState, self.ctx) + else: + return None + + +class LexerNoViableAltException(RecognitionException): + + def __init__(self, lexer:Lexer, input:InputStream, startIndex:int, deadEndConfigs:ATNConfigSet): + super().__init__(message=None, recognizer=lexer, input=input, ctx=None) + self.startIndex = startIndex + self.deadEndConfigs = deadEndConfigs + + def __str__(self): + symbol = "" + if self.startIndex >= 0 and self.startIndex < self.input.size: + symbol = self.input.getText(self.startIndex, self.startIndex) + # TODO symbol = Utils.escapeWhitespace(symbol, false); + return "LexerNoViableAltException('" + symbol + "')" + +# Indicates that the parser could not decide which of two or more paths +# to take based upon the remaining input. It tracks the starting token +# of the offending input and also knows where the parser was +# in the various paths when the error. Reported by reportNoViableAlternative() +# +class NoViableAltException(RecognitionException): + + def __init__(self, recognizer:Parser, input:TokenStream=None, startToken:Token=None, + offendingToken:Token=None, deadEndConfigs:ATNConfigSet=None, ctx:ParserRuleContext=None): + if ctx is None: + ctx = recognizer._ctx + if offendingToken is None: + offendingToken = recognizer.getCurrentToken() + if startToken is None: + startToken = recognizer.getCurrentToken() + if input is None: + input = recognizer.getInputStream() + super().__init__(recognizer=recognizer, input=input, ctx=ctx) + # Which configurations did we try at input.index() that couldn't match input.LT(1)?# + self.deadEndConfigs = deadEndConfigs + # The token object at the start index; the input stream might + # not be buffering tokens so get a reference to it. (At the + # time the error occurred, of course the stream needs to keep a + # buffer all of the tokens but later we might not have access to those.) + self.startToken = startToken + self.offendingToken = offendingToken + +# This signifies any kind of mismatched input exceptions such as +# when the current input does not match the expected token. +# +class InputMismatchException(RecognitionException): + + def __init__(self, recognizer:Parser): + super().__init__(recognizer=recognizer, input=recognizer.getInputStream(), ctx=recognizer._ctx) + self.offendingToken = recognizer.getCurrentToken() + + +# A semantic predicate failed during validation. Validation of predicates +# occurs when normally parsing the alternative just like matching a token. +# Disambiguating predicate evaluation occurs when we test a predicate during +# prediction. + +class FailedPredicateException(RecognitionException): + + def __init__(self, recognizer:Parser, predicate:str=None, message:str=None): + super().__init__(message=self.formatMessage(predicate,message), recognizer=recognizer, + input=recognizer.getInputStream(), ctx=recognizer._ctx) + s = recognizer._interp.atn.states[recognizer.state] + trans = s.transitions[0] + from antlr4.atn.Transition import PredicateTransition + if isinstance(trans, PredicateTransition): + self.ruleIndex = trans.ruleIndex + self.predicateIndex = trans.predIndex + else: + self.ruleIndex = 0 + self.predicateIndex = 0 + self.predicate = predicate + self.offendingToken = recognizer.getCurrentToken() + + def formatMessage(self, predicate:str, message:str): + if message is not None: + return message + else: + return "failed predicate: {" + predicate + "}?" + +class ParseCancellationException(CancellationException): + + pass + +del Token +del Lexer +del Parser +del TokenStream +del ATNConfigSet +del ParserRulecontext +del PredicateTransition +del BufferedTokenStream diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..216c000dc5ffc8e53cc9c596e420c1e67604d1aa --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/__init__.py @@ -0,0 +1 @@ +__author__ = 'ericvergnaud' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Chunk.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Chunk.py new file mode 100644 index 0000000000000000000000000000000000000000..081419a34f65463b370b848b141192bfe491befd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Chunk.py @@ -0,0 +1,30 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +class Chunk(object): + pass + +class TagChunk(Chunk): + __slots__ = ('tag', 'label') + + def __init__(self, tag:str, label:str=None): + self.tag = tag + self.label = label + + def __str__(self): + if self.label is None: + return self.tag + else: + return self.label + ":" + self.tag + +class TextChunk(Chunk): + __slots__ = 'text' + + def __init__(self, text:str): + self.text = text + + def __str__(self): + return "'" + self.text + "'" diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreeMatch.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreeMatch.py new file mode 100644 index 0000000000000000000000000000000000000000..c02bc0357d26b343a72307cda77ff62fe307a44a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreeMatch.py @@ -0,0 +1,118 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + + +# +# Represents the result of matching a {@link ParseTree} against a tree pattern. +# +from io import StringIO +from antlr4.tree.ParseTreePattern import ParseTreePattern +from antlr4.tree.Tree import ParseTree + + +class ParseTreeMatch(object): + __slots__ = ('tree', 'pattern', 'labels', 'mismatchedNode') + # + # Constructs a new instance of {@link ParseTreeMatch} from the specified + # parse tree and pattern. + # + # @param tree The parse tree to match against the pattern. + # @param pattern The parse tree pattern. + # @param labels A mapping from label names to collections of + # {@link ParseTree} objects located by the tree pattern matching process. + # @param mismatchedNode The first node which failed to match the tree + # pattern during the matching process. + # + # @exception IllegalArgumentException if {@code tree} is {@code null} + # @exception IllegalArgumentException if {@code pattern} is {@code null} + # @exception IllegalArgumentException if {@code labels} is {@code null} + # + def __init__(self, tree:ParseTree, pattern:ParseTreePattern, labels:dict, mismatchedNode:ParseTree): + if tree is None: + raise Exception("tree cannot be null") + if pattern is None: + raise Exception("pattern cannot be null") + if labels is None: + raise Exception("labels cannot be null") + self.tree = tree + self.pattern = pattern + self.labels = labels + self.mismatchedNode = mismatchedNode + + # + # Get the last node associated with a specific {@code label}. + # + #

For example, for pattern {@code }, {@code get("id")} returns the + # node matched for that {@code ID}. If more than one node + # matched the specified label, only the last is returned. If there is + # no node associated with the label, this returns {@code null}.

+ # + #

Pattern tags like {@code } and {@code } without labels are + # considered to be labeled with {@code ID} and {@code expr}, respectively.

+ # + # @param label The label to check. + # + # @return The last {@link ParseTree} to match a tag with the specified + # label, or {@code null} if no parse tree matched a tag with the label. + # + def get(self, label:str): + parseTrees = self.labels.get(label, None) + if parseTrees is None or len(parseTrees)==0: + return None + else: + return parseTrees[len(parseTrees)-1] + + # + # Return all nodes matching a rule or token tag with the specified label. + # + #

If the {@code label} is the name of a parser rule or token in the + # grammar, the resulting list will contain both the parse trees matching + # rule or tags explicitly labeled with the label and the complete set of + # parse trees matching the labeled and unlabeled tags in the pattern for + # the parser rule or token. For example, if {@code label} is {@code "foo"}, + # the result will contain all of the following.

+ # + #
    + #
  • Parse tree nodes matching tags of the form {@code } and + # {@code }.
  • + #
  • Parse tree nodes matching tags of the form {@code }.
  • + #
  • Parse tree nodes matching tags of the form {@code }.
  • + #
+ # + # @param label The label. + # + # @return A collection of all {@link ParseTree} nodes matching tags with + # the specified {@code label}. If no nodes matched the label, an empty list + # is returned. + # + def getAll(self, label:str): + nodes = self.labels.get(label, None) + if nodes is None: + return list() + else: + return nodes + + + # + # Gets a value indicating whether the match operation succeeded. + # + # @return {@code true} if the match operation succeeded; otherwise, + # {@code false}. + # + def succeeded(self): + return self.mismatchedNode is None + + # + # {@inheritDoc} + # + def __str__(self): + with StringIO() as buf: + buf.write("Match ") + buf.write("succeeded" if self.succeeded() else "failed") + buf.write("; found ") + buf.write(str(len(self.labels))) + buf.write(" labels") + return buf.getvalue() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreePattern.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreePattern.py new file mode 100644 index 0000000000000000000000000000000000000000..37fd0bf09f478d47f927b3fdf7d7a32da1c0b795 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreePattern.py @@ -0,0 +1,72 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# +# A pattern like {@code = ;} converted to a {@link ParseTree} by +# {@link ParseTreePatternMatcher#compile(String, int)}. +# +from antlr4.tree.ParseTreePatternMatcher import ParseTreePatternMatcher +from antlr4.tree.Tree import ParseTree +from antlr4.xpath.XPath import XPath + + +class ParseTreePattern(object): + __slots__ = ('matcher', 'patternRuleIndex', 'pattern', 'patternTree') + + # Construct a new instance of the {@link ParseTreePattern} class. + # + # @param matcher The {@link ParseTreePatternMatcher} which created this + # tree pattern. + # @param pattern The tree pattern in concrete syntax form. + # @param patternRuleIndex The parser rule which serves as the root of the + # tree pattern. + # @param patternTree The tree pattern in {@link ParseTree} form. + # + def __init__(self, matcher:ParseTreePatternMatcher, pattern:str, patternRuleIndex:int , patternTree:ParseTree): + self.matcher = matcher + self.patternRuleIndex = patternRuleIndex + self.pattern = pattern + self.patternTree = patternTree + + # + # Match a specific parse tree against this tree pattern. + # + # @param tree The parse tree to match against this tree pattern. + # @return A {@link ParseTreeMatch} object describing the result of the + # match operation. The {@link ParseTreeMatch#succeeded()} method can be + # used to determine whether or not the match was successful. + # + def match(self, tree:ParseTree): + return self.matcher.match(tree, self) + + # + # Determine whether or not a parse tree matches this tree pattern. + # + # @param tree The parse tree to match against this tree pattern. + # @return {@code true} if {@code tree} is a match for the current tree + # pattern; otherwise, {@code false}. + # + def matches(self, tree:ParseTree): + return self.matcher.match(tree, self).succeeded() + + # Find all nodes using XPath and then try to match those subtrees against + # this tree pattern. + # + # @param tree The {@link ParseTree} to match against this pattern. + # @param xpath An expression matching the nodes + # + # @return A collection of {@link ParseTreeMatch} objects describing the + # successful matches. Unsuccessful matches are omitted from the result, + # regardless of the reason for the failure. + # + def findAll(self, tree:ParseTree, xpath:str): + subtrees = XPath.findAll(tree, xpath, self.matcher.parser) + matches = list() + for t in subtrees: + match = self.match(t) + if match.succeeded(): + matches.append(match) + return matches diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreePatternMatcher.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreePatternMatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..62fd197b0d143393fa187ead9b0c576112b486be --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreePatternMatcher.py @@ -0,0 +1,374 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# +# A tree pattern matching mechanism for ANTLR {@link ParseTree}s. +# +#

Patterns are strings of source input text with special tags representing +# token or rule references such as:

+# +#

{@code = ;}

+# +#

Given a pattern start rule such as {@code statement}, this object constructs +# a {@link ParseTree} with placeholders for the {@code ID} and {@code expr} +# subtree. Then the {@link #match} routines can compare an actual +# {@link ParseTree} from a parse with this pattern. Tag {@code } matches +# any {@code ID} token and tag {@code } references the result of the +# {@code expr} rule (generally an instance of {@code ExprContext}.

+# +#

Pattern {@code x = 0;} is a similar pattern that matches the same pattern +# except that it requires the identifier to be {@code x} and the expression to +# be {@code 0}.

+# +#

The {@link #matches} routines return {@code true} or {@code false} based +# upon a match for the tree rooted at the parameter sent in. The +# {@link #match} routines return a {@link ParseTreeMatch} object that +# contains the parse tree, the parse tree pattern, and a map from tag name to +# matched nodes (more below). A subtree that fails to match, returns with +# {@link ParseTreeMatch#mismatchedNode} set to the first tree node that did not +# match.

+# +#

For efficiency, you can compile a tree pattern in string form to a +# {@link ParseTreePattern} object.

+# +#

See {@code TestParseTreeMatcher} for lots of examples. +# {@link ParseTreePattern} has two static helper methods: +# {@link ParseTreePattern#findAll} and {@link ParseTreePattern#match} that +# are easy to use but not super efficient because they create new +# {@link ParseTreePatternMatcher} objects each time and have to compile the +# pattern in string form before using it.

+# +#

The lexer and parser that you pass into the {@link ParseTreePatternMatcher} +# constructor are used to parse the pattern in string form. The lexer converts +# the {@code = ;} into a sequence of four tokens (assuming lexer +# throws out whitespace or puts it on a hidden channel). Be aware that the +# input stream is reset for the lexer (but not the parser; a +# {@link ParserInterpreter} is created to parse the input.). Any user-defined +# fields you have put into the lexer might get changed when this mechanism asks +# it to scan the pattern string.

+# +#

Normally a parser does not accept token {@code } as a valid +# {@code expr} but, from the parser passed in, we create a special version of +# the underlying grammar representation (an {@link ATN}) that allows imaginary +# tokens representing rules ({@code }) to match entire rules. We call +# these bypass alternatives.

+# +#

Delimiters are {@code <} and {@code >}, with {@code \} as the escape string +# by default, but you can set them to whatever you want using +# {@link #setDelimiters}. You must escape both start and stop strings +# {@code \<} and {@code \>}.

+# +from antlr4.CommonTokenStream import CommonTokenStream +from antlr4.InputStream import InputStream +from antlr4.ParserRuleContext import ParserRuleContext +from antlr4.Lexer import Lexer +from antlr4.ListTokenSource import ListTokenSource +from antlr4.Token import Token +from antlr4.error.ErrorStrategy import BailErrorStrategy +from antlr4.error.Errors import RecognitionException, ParseCancellationException +from antlr4.tree.Chunk import TagChunk, TextChunk +from antlr4.tree.RuleTagToken import RuleTagToken +from antlr4.tree.TokenTagToken import TokenTagToken +from antlr4.tree.Tree import ParseTree, TerminalNode, RuleNode + +# need forward declaration +Parser = None +ParseTreePattern = None + +class CannotInvokeStartRule(Exception): + + def __init__(self, e:Exception): + super().__init__(e) + +class StartRuleDoesNotConsumeFullPattern(Exception): + + pass + + +class ParseTreePatternMatcher(object): + __slots__ = ('lexer', 'parser', 'start', 'stop', 'escape') + + # Constructs a {@link ParseTreePatternMatcher} or from a {@link Lexer} and + # {@link Parser} object. The lexer input stream is altered for tokenizing + # the tree patterns. The parser is used as a convenient mechanism to get + # the grammar name, plus token, rule names. + def __init__(self, lexer:Lexer, parser:Parser): + self.lexer = lexer + self.parser = parser + self.start = "<" + self.stop = ">" + self.escape = "\\" # e.g., \< and \> must escape BOTH! + + # Set the delimiters used for marking rule and token tags within concrete + # syntax used by the tree pattern parser. + # + # @param start The start delimiter. + # @param stop The stop delimiter. + # @param escapeLeft The escape sequence to use for escaping a start or stop delimiter. + # + # @exception IllegalArgumentException if {@code start} is {@code null} or empty. + # @exception IllegalArgumentException if {@code stop} is {@code null} or empty. + # + def setDelimiters(self, start:str, stop:str, escapeLeft:str): + if start is None or len(start)==0: + raise Exception("start cannot be null or empty") + if stop is None or len(stop)==0: + raise Exception("stop cannot be null or empty") + self.start = start + self.stop = stop + self.escape = escapeLeft + + # Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code tree}?# + def matchesRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int): + p = self.compileTreePattern(pattern, patternRuleIndex) + return self.matches(tree, p) + + # Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in a + # compiled pattern instead of a string representation of a tree pattern. + # + def matchesPattern(self, tree:ParseTree, pattern:ParseTreePattern): + mismatchedNode = self.matchImpl(tree, pattern.patternTree, dict()) + return mismatchedNode is None + + # + # Compare {@code pattern} matched as rule {@code patternRuleIndex} against + # {@code tree} and return a {@link ParseTreeMatch} object that contains the + # matched elements, or the node at which the match failed. + # + def matchRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int): + p = self.compileTreePattern(pattern, patternRuleIndex) + return self.matchPattern(tree, p) + + # + # Compare {@code pattern} matched against {@code tree} and return a + # {@link ParseTreeMatch} object that contains the matched elements, or the + # node at which the match failed. Pass in a compiled pattern instead of a + # string representation of a tree pattern. + # + def matchPattern(self, tree:ParseTree, pattern:ParseTreePattern): + labels = dict() + mismatchedNode = self.matchImpl(tree, pattern.patternTree, labels) + from antlr4.tree.ParseTreeMatch import ParseTreeMatch + return ParseTreeMatch(tree, pattern, labels, mismatchedNode) + + # + # For repeated use of a tree pattern, compile it to a + # {@link ParseTreePattern} using this method. + # + def compileTreePattern(self, pattern:str, patternRuleIndex:int): + tokenList = self.tokenize(pattern) + tokenSrc = ListTokenSource(tokenList) + tokens = CommonTokenStream(tokenSrc) + from antlr4.ParserInterpreter import ParserInterpreter + parserInterp = ParserInterpreter(self.parser.grammarFileName, self.parser.tokenNames, + self.parser.ruleNames, self.parser.getATNWithBypassAlts(),tokens) + tree = None + try: + parserInterp.setErrorHandler(BailErrorStrategy()) + tree = parserInterp.parse(patternRuleIndex) + except ParseCancellationException as e: + raise e.cause + except RecognitionException as e: + raise e + except Exception as e: + raise CannotInvokeStartRule(e) + + # Make sure tree pattern compilation checks for a complete parse + if tokens.LA(1)!=Token.EOF: + raise StartRuleDoesNotConsumeFullPattern() + + from antlr4.tree.ParseTreePattern import ParseTreePattern + return ParseTreePattern(self, pattern, patternRuleIndex, tree) + + # + # Recursively walk {@code tree} against {@code patternTree}, filling + # {@code match.}{@link ParseTreeMatch#labels labels}. + # + # @return the first node encountered in {@code tree} which does not match + # a corresponding node in {@code patternTree}, or {@code null} if the match + # was successful. The specific node returned depends on the matching + # algorithm used by the implementation, and may be overridden. + # + def matchImpl(self, tree:ParseTree, patternTree:ParseTree, labels:dict): + if tree is None: + raise Exception("tree cannot be null") + if patternTree is None: + raise Exception("patternTree cannot be null") + + # x and , x and y, or x and x; or could be mismatched types + if isinstance(tree, TerminalNode) and isinstance(patternTree, TerminalNode ): + mismatchedNode = None + # both are tokens and they have same type + if tree.symbol.type == patternTree.symbol.type: + if isinstance( patternTree.symbol, TokenTagToken ): # x and + tokenTagToken = patternTree.symbol + # track label->list-of-nodes for both token name and label (if any) + self.map(labels, tokenTagToken.tokenName, tree) + if tokenTagToken.label is not None: + self.map(labels, tokenTagToken.label, tree) + elif tree.getText()==patternTree.getText(): + # x and x + pass + else: + # x and y + if mismatchedNode is None: + mismatchedNode = tree + else: + if mismatchedNode is None: + mismatchedNode = tree + + return mismatchedNode + + if isinstance(tree, ParserRuleContext) and isinstance(patternTree, ParserRuleContext): + mismatchedNode = None + # (expr ...) and + ruleTagToken = self.getRuleTagToken(patternTree) + if ruleTagToken is not None: + m = None + if tree.ruleContext.ruleIndex == patternTree.ruleContext.ruleIndex: + # track label->list-of-nodes for both rule name and label (if any) + self.map(labels, ruleTagToken.ruleName, tree) + if ruleTagToken.label is not None: + self.map(labels, ruleTagToken.label, tree) + else: + if mismatchedNode is None: + mismatchedNode = tree + + return mismatchedNode + + # (expr ...) and (expr ...) + if tree.getChildCount()!=patternTree.getChildCount(): + if mismatchedNode is None: + mismatchedNode = tree + return mismatchedNode + + n = tree.getChildCount() + for i in range(0, n): + childMatch = self.matchImpl(tree.getChild(i), patternTree.getChild(i), labels) + if childMatch is not None: + return childMatch + + return mismatchedNode + + # if nodes aren't both tokens or both rule nodes, can't match + return tree + + def map(self, labels, label, tree): + v = labels.get(label, None) + if v is None: + v = list() + labels[label] = v + v.append(tree) + + # Is {@code t} {@code (expr )} subtree?# + def getRuleTagToken(self, tree:ParseTree): + if isinstance( tree, RuleNode ): + if tree.getChildCount()==1 and isinstance(tree.getChild(0), TerminalNode ): + c = tree.getChild(0) + if isinstance( c.symbol, RuleTagToken ): + return c.symbol + return None + + def tokenize(self, pattern:str): + # split pattern into chunks: sea (raw input) and islands (, ) + chunks = self.split(pattern) + + # create token stream from text and tags + tokens = list() + for chunk in chunks: + if isinstance( chunk, TagChunk ): + # add special rule token or conjure up new token from name + if chunk.tag[0].isupper(): + ttype = self.parser.getTokenType(chunk.tag) + if ttype==Token.INVALID_TYPE: + raise Exception("Unknown token " + str(chunk.tag) + " in pattern: " + pattern) + tokens.append(TokenTagToken(chunk.tag, ttype, chunk.label)) + elif chunk.tag[0].islower(): + ruleIndex = self.parser.getRuleIndex(chunk.tag) + if ruleIndex==-1: + raise Exception("Unknown rule " + str(chunk.tag) + " in pattern: " + pattern) + ruleImaginaryTokenType = self.parser.getATNWithBypassAlts().ruleToTokenType[ruleIndex] + tokens.append(RuleTagToken(chunk.tag, ruleImaginaryTokenType, chunk.label)) + else: + raise Exception("invalid tag: " + str(chunk.tag) + " in pattern: " + pattern) + else: + self.lexer.setInputStream(InputStream(chunk.text)) + t = self.lexer.nextToken() + while t.type!=Token.EOF: + tokens.append(t) + t = self.lexer.nextToken() + return tokens + + # Split {@code = ;} into 4 chunks for tokenizing by {@link #tokenize}.# + def split(self, pattern:str): + p = 0 + n = len(pattern) + chunks = list() + # find all start and stop indexes first, then collect + starts = list() + stops = list() + while p < n : + if p == pattern.find(self.escape + self.start, p): + p += len(self.escape) + len(self.start) + elif p == pattern.find(self.escape + self.stop, p): + p += len(self.escape) + len(self.stop) + elif p == pattern.find(self.start, p): + starts.append(p) + p += len(self.start) + elif p == pattern.find(self.stop, p): + stops.append(p) + p += len(self.stop) + else: + p += 1 + + nt = len(starts) + + if nt > len(stops): + raise Exception("unterminated tag in pattern: " + pattern) + if nt < len(stops): + raise Exception("missing start tag in pattern: " + pattern) + + for i in range(0, nt): + if starts[i] >= stops[i]: + raise Exception("tag delimiters out of order in pattern: " + pattern) + + # collect into chunks now + if nt==0: + chunks.append(TextChunk(pattern)) + + if nt>0 and starts[0]>0: # copy text up to first tag into chunks + text = pattern[0:starts[0]] + chunks.add(TextChunk(text)) + + for i in range(0, nt): + # copy inside of + tag = pattern[starts[i] + len(self.start) : stops[i]] + ruleOrToken = tag + label = None + colon = tag.find(':') + if colon >= 0: + label = tag[0:colon] + ruleOrToken = tag[colon+1 : len(tag)] + chunks.append(TagChunk(label, ruleOrToken)) + if i+1 < len(starts): + # copy from end of to start of next + text = pattern[stops[i] + len(self.stop) : starts[i + 1]] + chunks.append(TextChunk(text)) + + if nt > 0 : + afterLastTag = stops[nt - 1] + len(self.stop) + if afterLastTag < n : # copy text from end of last tag to end + text = pattern[afterLastTag : n] + chunks.append(TextChunk(text)) + + # strip out the escape sequences from text chunks but not tags + for i in range(0, len(chunks)): + c = chunks[i] + if isinstance( c, TextChunk ): + unescaped = c.text.replace(self.escape, "") + if len(unescaped) < len(c.text): + chunks[i] = TextChunk(unescaped) + return chunks diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/RuleTagToken.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/RuleTagToken.py new file mode 100644 index 0000000000000000000000000000000000000000..a198f7da13643d538ce96aeeb6a8ff4f757f1ecd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/RuleTagToken.py @@ -0,0 +1,50 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# +# A {@link Token} object representing an entire subtree matched by a parser +# rule; e.g., {@code }. These tokens are created for {@link TagChunk} +# chunks where the tag corresponds to a parser rule. +# +from antlr4.Token import Token + + +class RuleTagToken(Token): + __slots__ = ('label', 'ruleName') + # + # Constructs a new instance of {@link RuleTagToken} with the specified rule + # name, bypass token type, and label. + # + # @param ruleName The name of the parser rule this rule tag matches. + # @param bypassTokenType The bypass token type assigned to the parser rule. + # @param label The label associated with the rule tag, or {@code null} if + # the rule tag is unlabeled. + # + # @exception IllegalArgumentException if {@code ruleName} is {@code null} + # or empty. + + def __init__(self, ruleName:str, bypassTokenType:int, label:str=None): + if ruleName is None or len(ruleName)==0: + raise Exception("ruleName cannot be null or empty.") + self.source = None + self.type = bypassTokenType # token type of the token + self.channel = Token.DEFAULT_CHANNEL # The parser ignores everything not on DEFAULT_CHANNEL + self.start = -1 # optional; return -1 if not implemented. + self.stop = -1 # optional; return -1 if not implemented. + self.tokenIndex = -1 # from 0..n-1 of the token object in the input stream + self.line = 0 # line=1..n of the 1st character + self.column = -1 # beginning of the line at which it occurs, 0..n-1 + self.label = label + self._text = self.getText() # text of the token. + + self.ruleName = ruleName + + + def getText(self): + if self.label is None: + return "<" + self.ruleName + ">" + else: + return "<" + self.label + ":" + self.ruleName + ">" diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/TokenTagToken.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/TokenTagToken.py new file mode 100644 index 0000000000000000000000000000000000000000..b7beeb87684c06606e17053f0f74fcae36876959 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/TokenTagToken.py @@ -0,0 +1,47 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# +# A {@link Token} object representing a token of a particular type; e.g., +# {@code }. These tokens are created for {@link TagChunk} chunks where the +# tag corresponds to a lexer rule or token type. +# +from antlr4.Token import CommonToken + + +class TokenTagToken(CommonToken): + __slots__ = ('tokenName', 'label') + # Constructs a new instance of {@link TokenTagToken} with the specified + # token name, type, and label. + # + # @param tokenName The token name. + # @param type The token type. + # @param label The label associated with the token tag, or {@code null} if + # the token tag is unlabeled. + # + def __init__(self, tokenName:str, type:int, label:str=None): + super().__init__(type=type) + self.tokenName = tokenName + self.label = label + self._text = self.getText() + + # + # {@inheritDoc} + # + #

The implementation for {@link TokenTagToken} returns the token tag + # formatted with {@code <} and {@code >} delimiters.

+ # + def getText(self): + if self.label is None: + return "<" + self.tokenName + ">" + else: + return "<" + self.label + ":" + self.tokenName + ">" + + #

The implementation for {@link TokenTagToken} returns a string of the form + # {@code tokenName:type}.

+ # + def __str__(self): + return self.tokenName + ":" + str(self.type) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Tree.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Tree.py new file mode 100644 index 0000000000000000000000000000000000000000..812acc96bbee97860bc8a914feedcd0584def050 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Tree.py @@ -0,0 +1,191 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ + + +# The basic notion of a tree has a parent, a payload, and a list of children. +# It is the most abstract interface for all the trees used by ANTLR. +#/ +from antlr4.Token import Token + +INVALID_INTERVAL = (-1, -2) + +class Tree(object): + pass + +class SyntaxTree(Tree): + pass + +class ParseTree(SyntaxTree): + pass + +class RuleNode(ParseTree): + pass + +class TerminalNode(ParseTree): + pass + +class ErrorNode(TerminalNode): + pass + +class ParseTreeVisitor(object): + def visit(self, tree): + return tree.accept(self) + + def visitChildren(self, node): + result = self.defaultResult() + n = node.getChildCount() + for i in range(n): + if not self.shouldVisitNextChild(node, result): + return result + + c = node.getChild(i) + childResult = c.accept(self) + result = self.aggregateResult(result, childResult) + + return result + + def visitTerminal(self, node): + return self.defaultResult() + + def visitErrorNode(self, node): + return self.defaultResult() + + def defaultResult(self): + return None + + def aggregateResult(self, aggregate, nextResult): + return nextResult + + def shouldVisitNextChild(self, node, currentResult): + return True + +ParserRuleContext = None + +class ParseTreeListener(object): + + def visitTerminal(self, node:TerminalNode): + pass + + def visitErrorNode(self, node:ErrorNode): + pass + + def enterEveryRule(self, ctx:ParserRuleContext): + pass + + def exitEveryRule(self, ctx:ParserRuleContext): + pass + +del ParserRuleContext + +class TerminalNodeImpl(TerminalNode): + __slots__ = ('parentCtx', 'symbol') + + def __init__(self, symbol:Token): + self.parentCtx = None + self.symbol = symbol + def __setattr__(self, key, value): + super().__setattr__(key, value) + + def getChild(self, i:int): + return None + + def getSymbol(self): + return self.symbol + + def getParent(self): + return self.parentCtx + + def getPayload(self): + return self.symbol + + def getSourceInterval(self): + if self.symbol is None: + return INVALID_INTERVAL + tokenIndex = self.symbol.tokenIndex + return (tokenIndex, tokenIndex) + + def getChildCount(self): + return 0 + + def accept(self, visitor:ParseTreeVisitor): + return visitor.visitTerminal(self) + + def getText(self): + return self.symbol.text + + def __str__(self): + if self.symbol.type == Token.EOF: + return "" + else: + return self.symbol.text + +# Represents a token that was consumed during resynchronization +# rather than during a valid match operation. For example, +# we will create this kind of a node during single token insertion +# and deletion as well as during "consume until error recovery set" +# upon no viable alternative exceptions. + +class ErrorNodeImpl(TerminalNodeImpl,ErrorNode): + + def __init__(self, token:Token): + super().__init__(token) + + def accept(self, visitor:ParseTreeVisitor): + return visitor.visitErrorNode(self) + + +class ParseTreeWalker(object): + + DEFAULT = None + + def walk(self, listener:ParseTreeListener, t:ParseTree): + """ + Performs a walk on the given parse tree starting at the root and going down recursively + with depth-first search. On each node, {@link ParseTreeWalker#enterRule} is called before + recursively walking down into child nodes, then + {@link ParseTreeWalker#exitRule} is called after the recursive call to wind up. + @param listener The listener used by the walker to process grammar rules + @param t The parse tree to be walked on + """ + if isinstance(t, ErrorNode): + listener.visitErrorNode(t) + return + elif isinstance(t, TerminalNode): + listener.visitTerminal(t) + return + self.enterRule(listener, t) + for child in t.getChildren(): + self.walk(listener, child) + self.exitRule(listener, t) + + # + # The discovery of a rule node, involves sending two events: the generic + # {@link ParseTreeListener#enterEveryRule} and a + # {@link RuleContext}-specific event. First we trigger the generic and then + # the rule specific. We to them in reverse order upon finishing the node. + # + def enterRule(self, listener:ParseTreeListener, r:RuleNode): + """ + Enters a grammar rule by first triggering the generic event {@link ParseTreeListener#enterEveryRule} + then by triggering the event specific to the given parse tree node + @param listener The listener responding to the trigger events + @param r The grammar rule containing the rule context + """ + ctx = r.getRuleContext() + listener.enterEveryRule(ctx) + ctx.enterRule(listener) + + def exitRule(self, listener:ParseTreeListener, r:RuleNode): + """ + Exits a grammar rule by first triggering the event specific to the given parse tree node + then by triggering the generic event {@link ParseTreeListener#exitEveryRule} + @param listener The listener responding to the trigger events + @param r The grammar rule containing the rule context + """ + ctx = r.getRuleContext() + ctx.exitRule(listener) + listener.exitEveryRule(ctx) + +ParseTreeWalker.DEFAULT = ParseTreeWalker() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Trees.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Trees.py new file mode 100644 index 0000000000000000000000000000000000000000..686b8cb287b3058c2e0b33dfb1567320299f214e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Trees.py @@ -0,0 +1,111 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + + +# A set of utility routines useful for all kinds of ANTLR trees.# +from io import StringIO +from antlr4.Token import Token +from antlr4.Utils import escapeWhitespace +from antlr4.tree.Tree import RuleNode, ErrorNode, TerminalNode, Tree, ParseTree + +# need forward declaration +Parser = None + +class Trees(object): + + # Print out a whole tree in LISP form. {@link #getNodeText} is used on the + # node payloads to get the text for the nodes. Detect + # parse trees and extract data appropriately. + @classmethod + def toStringTree(cls, t:Tree, ruleNames:list=None, recog:Parser=None): + if recog is not None: + ruleNames = recog.ruleNames + s = escapeWhitespace(cls.getNodeText(t, ruleNames), False) + if t.getChildCount()==0: + return s + with StringIO() as buf: + buf.write("(") + buf.write(s) + buf.write(' ') + for i in range(0, t.getChildCount()): + if i > 0: + buf.write(' ') + buf.write(cls.toStringTree(t.getChild(i), ruleNames)) + buf.write(")") + return buf.getvalue() + + @classmethod + def getNodeText(cls, t:Tree, ruleNames:list=None, recog:Parser=None): + if recog is not None: + ruleNames = recog.ruleNames + if ruleNames is not None: + if isinstance(t, RuleNode): + if t.getAltNumber()!=0: # should use ATN.INVALID_ALT_NUMBER but won't compile + return ruleNames[t.getRuleIndex()]+":"+str(t.getAltNumber()) + return ruleNames[t.getRuleIndex()] + elif isinstance( t, ErrorNode): + return str(t) + elif isinstance(t, TerminalNode): + if t.symbol is not None: + return t.symbol.text + # no recog for rule names + payload = t.getPayload() + if isinstance(payload, Token ): + return payload.text + return str(t.getPayload()) + + + # Return ordered list of all children of this node + @classmethod + def getChildren(cls, t:Tree): + return [ t.getChild(i) for i in range(0, t.getChildCount()) ] + + # Return a list of all ancestors of this node. The first node of + # list is the root and the last is the parent of this node. + # + @classmethod + def getAncestors(cls, t:Tree): + ancestors = [] + t = t.getParent() + while t is not None: + ancestors.insert(0, t) # insert at start + t = t.getParent() + return ancestors + + @classmethod + def findAllTokenNodes(cls, t:ParseTree, ttype:int): + return cls.findAllNodes(t, ttype, True) + + @classmethod + def findAllRuleNodes(cls, t:ParseTree, ruleIndex:int): + return cls.findAllNodes(t, ruleIndex, False) + + @classmethod + def findAllNodes(cls, t:ParseTree, index:int, findTokens:bool): + nodes = [] + cls._findAllNodes(t, index, findTokens, nodes) + return nodes + + @classmethod + def _findAllNodes(cls, t:ParseTree, index:int, findTokens:bool, nodes:list): + from antlr4.ParserRuleContext import ParserRuleContext + # check this node (the root) first + if findTokens and isinstance(t, TerminalNode): + if t.symbol.type==index: + nodes.append(t) + elif not findTokens and isinstance(t, ParserRuleContext): + if t.ruleIndex == index: + nodes.append(t) + # check children + for i in range(0, t.getChildCount()): + cls._findAllNodes(t.getChild(i), index, findTokens, nodes) + + @classmethod + def descendants(cls, t:ParseTree): + nodes = [t] + for i in range(0, t.getChildCount()): + nodes.extend(cls.descendants(t.getChild(i))) + return nodes diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/Chunk.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/Chunk.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f654f77d305451e7adf490f72452f8455f53ed56 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/Chunk.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/RuleTagToken.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/RuleTagToken.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f87bf11601a645bedd0b9fb4508b4e93cbd69358 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/RuleTagToken.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/Tree.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/Tree.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ad2bd6b9afaa03e481ff9387cb9ba720d9f2aa0 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/Tree.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/xpath/XPath.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/xpath/XPath.py new file mode 100644 index 0000000000000000000000000000000000000000..24029e7cf2bfa9798538211bc7599b49dbb70191 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/xpath/XPath.py @@ -0,0 +1,352 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# +# Represent a subset of XPath XML path syntax for use in identifying nodes in +# parse trees. +# +#

+# Split path into words and separators {@code /} and {@code //} via ANTLR +# itself then walk path elements from left to right. At each separator-word +# pair, find set of nodes. Next stage uses those as work list.

+# +#

+# The basic interface is +# {@link XPath#findAll ParseTree.findAll}{@code (tree, pathString, parser)}. +# But that is just shorthand for:

+# +#
+# {@link XPath} p = new {@link XPath#XPath XPath}(parser, pathString);
+# return p.{@link #evaluate evaluate}(tree);
+# 
+# +#

+# See {@code org.antlr.v4.test.TestXPath} for descriptions. In short, this +# allows operators:

+# +#
+#
/
root
+#
//
anywhere
+#
!
invert; this must appear directly after root or anywhere +# operator
+#
+# +#

+# and path elements:

+# +#
+#
ID
token name
+#
'string'
any string literal token from the grammar
+#
expr
rule name
+#
*
wildcard matching any node
+#
+# +#

+# Whitespace is not allowed.

+# +from antlr4 import CommonTokenStream, DFA, PredictionContextCache, Lexer, LexerATNSimulator, ParserRuleContext, TerminalNode +from antlr4.InputStream import InputStream +from antlr4.Parser import Parser +from antlr4.RuleContext import RuleContext +from antlr4.Token import Token +from antlr4.atn.ATNDeserializer import ATNDeserializer +from antlr4.error.ErrorListener import ErrorListener +from antlr4.error.Errors import LexerNoViableAltException +from antlr4.tree.Tree import ParseTree +from antlr4.tree.Trees import Trees +from io import StringIO + + +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\n") + buf.write("\64\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t") + buf.write("\7\4\b\t\b\4\t\t\t\3\2\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5") + buf.write("\3\6\3\6\7\6\37\n\6\f\6\16\6\"\13\6\3\6\3\6\3\7\3\7\5") + buf.write("\7(\n\7\3\b\3\b\3\t\3\t\7\t.\n\t\f\t\16\t\61\13\t\3\t") + buf.write("\3\t\3/\2\n\3\5\5\6\7\7\t\b\13\t\r\2\17\2\21\n\3\2\4\7") + buf.write("\2\62;aa\u00b9\u00b9\u0302\u0371\u2041\u2042\17\2C\\c") + buf.write("|\u00c2\u00d8\u00da\u00f8\u00fa\u0301\u0372\u037f\u0381") + buf.write("\u2001\u200e\u200f\u2072\u2191\u2c02\u2ff1\u3003\ud801") + buf.write("\uf902\ufdd1\ufdf2\uffff\64\2\3\3\2\2\2\2\5\3\2\2\2\2") + buf.write("\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\21\3\2\2\2\3\23") + buf.write("\3\2\2\2\5\26\3\2\2\2\7\30\3\2\2\2\t\32\3\2\2\2\13\34") + buf.write("\3\2\2\2\r\'\3\2\2\2\17)\3\2\2\2\21+\3\2\2\2\23\24\7\61") + buf.write("\2\2\24\25\7\61\2\2\25\4\3\2\2\2\26\27\7\61\2\2\27\6\3") + buf.write("\2\2\2\30\31\7,\2\2\31\b\3\2\2\2\32\33\7#\2\2\33\n\3\2") + buf.write("\2\2\34 \5\17\b\2\35\37\5\r\7\2\36\35\3\2\2\2\37\"\3\2") + buf.write("\2\2 \36\3\2\2\2 !\3\2\2\2!#\3\2\2\2\" \3\2\2\2#$\b\6") + buf.write("\2\2$\f\3\2\2\2%(\5\17\b\2&(\t\2\2\2\'%\3\2\2\2\'&\3\2") + buf.write("\2\2(\16\3\2\2\2)*\t\3\2\2*\20\3\2\2\2+/\7)\2\2,.\13\2") + buf.write("\2\2-,\3\2\2\2.\61\3\2\2\2/\60\3\2\2\2/-\3\2\2\2\60\62") + buf.write("\3\2\2\2\61/\3\2\2\2\62\63\7)\2\2\63\22\3\2\2\2\6\2 \'") + buf.write("/\3\3\6\2") + return buf.getvalue() + + +class XPathLexer(Lexer): + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + + TOKEN_REF = 1 + RULE_REF = 2 + ANYWHERE = 3 + ROOT = 4 + WILDCARD = 5 + BANG = 6 + ID = 7 + STRING = 8 + + modeNames = [ "DEFAULT_MODE" ] + + literalNames = [ "", + "'//'", "'/'", "'*'", "'!'" ] + + symbolicNames = [ "", + "TOKEN_REF", "RULE_REF", "ANYWHERE", "ROOT", "WILDCARD", "BANG", + "ID", "STRING" ] + + ruleNames = [ "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID", "NameChar", + "NameStartChar", "STRING" ] + + grammarFileName = "XPathLexer.g4" + + def __init__(self, input=None): + super().__init__(input) + self.checkVersion("4.9.1") + self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) + self._actions = None + self._predicates = None + + + def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int): + if self._actions is None: + actions = dict() + actions[4] = self.ID_action + self._actions = actions + _action = self._actions.get(ruleIndex, None) + if _action is not None: + _action(localctx, actionIndex) + else: + raise Exception("No registered action for: %d" % ruleIndex) + + def ID_action(self, localctx:RuleContext , actionIndex:int): + if actionIndex == 0: + char = self.text[0] + if char.isupper(): + self.type = XPathLexer.TOKEN_REF + else: + self.type = XPathLexer.RULE_REF + +class XPath(object): + + WILDCARD = "*" # word not operator/separator + NOT = "!" # word for invert operator + + def __init__(self, parser:Parser, path:str): + self.parser = parser + self.path = path + self.elements = self.split(path) + + def split(self, path:str): + input = InputStream(path) + lexer = XPathLexer(input) + def recover(self, e): + raise e + lexer.recover = recover + lexer.removeErrorListeners() + lexer.addErrorListener(ErrorListener()) # XPathErrorListener does no more + tokenStream = CommonTokenStream(lexer) + try: + tokenStream.fill() + except LexerNoViableAltException as e: + pos = lexer.column + msg = "Invalid tokens or characters at index %d in path '%s'" % (pos, path) + raise Exception(msg, e) + + tokens = iter(tokenStream.tokens) + elements = list() + for el in tokens: + invert = False + anywhere = False + # Check for path separators, if none assume root + if el.type in [XPathLexer.ROOT, XPathLexer.ANYWHERE]: + anywhere = el.type == XPathLexer.ANYWHERE + next_el = next(tokens, None) + if not next_el: + raise Exception('Missing element after %s' % el.getText()) + else: + el = next_el + # Check for bangs + if el.type == XPathLexer.BANG: + invert = True + next_el = next(tokens, None) + if not next_el: + raise Exception('Missing element after %s' % el.getText()) + else: + el = next_el + # Add searched element + if el.type in [XPathLexer.TOKEN_REF, XPathLexer.RULE_REF, XPathLexer.WILDCARD, XPathLexer.STRING]: + element = self.getXPathElement(el, anywhere) + element.invert = invert + elements.append(element) + elif el.type==Token.EOF: + break + else: + raise Exception("Unknown path element %s" % lexer.symbolicNames[el.type]) + return elements + + # + # Convert word like {@code#} or {@code ID} or {@code expr} to a path + # element. {@code anywhere} is {@code true} if {@code //} precedes the + # word. + # + def getXPathElement(self, wordToken:Token, anywhere:bool): + if wordToken.type==Token.EOF: + raise Exception("Missing path element at end of path") + + word = wordToken.text + if wordToken.type==XPathLexer.WILDCARD : + return XPathWildcardAnywhereElement() if anywhere else XPathWildcardElement() + + elif wordToken.type in [XPathLexer.TOKEN_REF, XPathLexer.STRING]: + tsource = self.parser.getTokenStream().tokenSource + + ttype = Token.INVALID_TYPE + if wordToken.type == XPathLexer.TOKEN_REF: + if word in tsource.ruleNames: + ttype = tsource.ruleNames.index(word) + 1 + else: + if word in tsource.literalNames: + ttype = tsource.literalNames.index(word) + + if ttype == Token.INVALID_TYPE: + raise Exception("%s at index %d isn't a valid token name" % (word, wordToken.tokenIndex)) + return XPathTokenAnywhereElement(word, ttype) if anywhere else XPathTokenElement(word, ttype) + + else: + ruleIndex = self.parser.ruleNames.index(word) if word in self.parser.ruleNames else -1 + + if ruleIndex == -1: + raise Exception("%s at index %d isn't a valid rule name" % (word, wordToken.tokenIndex)) + return XPathRuleAnywhereElement(word, ruleIndex) if anywhere else XPathRuleElement(word, ruleIndex) + + + @staticmethod + def findAll(tree:ParseTree, xpath:str, parser:Parser): + p = XPath(parser, xpath) + return p.evaluate(tree) + + # + # Return a list of all nodes starting at {@code t} as root that satisfy the + # path. The root {@code /} is relative to the node passed to + # {@link #evaluate}. + # + def evaluate(self, t:ParseTree): + dummyRoot = ParserRuleContext() + dummyRoot.children = [t] # don't set t's parent. + + work = [dummyRoot] + for element in self.elements: + work_next = list() + for node in work: + if not isinstance(node, TerminalNode) and node.children: + # only try to match next element if it has children + # e.g., //func/*/stat might have a token node for which + # we can't go looking for stat nodes. + matching = element.evaluate(node) + + # See issue antlr#370 - Prevents XPath from returning the + # same node multiple times + matching = filter(lambda m: m not in work_next, matching) + + work_next.extend(matching) + work = work_next + + return work + + +class XPathElement(object): + + def __init__(self, nodeName:str): + self.nodeName = nodeName + self.invert = False + + def __str__(self): + return type(self).__name__ + "[" + ("!" if self.invert else "") + self.nodeName + "]" + + + +# +# Either {@code ID} at start of path or {@code ...//ID} in middle of path. +# +class XPathRuleAnywhereElement(XPathElement): + + def __init__(self, ruleName:str, ruleIndex:int): + super().__init__(ruleName) + self.ruleIndex = ruleIndex + + def evaluate(self, t:ParseTree): + # return all ParserRuleContext descendants of t that match ruleIndex (or do not match if inverted) + return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.descendants(t)) + +class XPathRuleElement(XPathElement): + + def __init__(self, ruleName:str, ruleIndex:int): + super().__init__(ruleName) + self.ruleIndex = ruleIndex + + def evaluate(self, t:ParseTree): + # return all ParserRuleContext children of t that match ruleIndex (or do not match if inverted) + return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.getChildren(t)) + +class XPathTokenAnywhereElement(XPathElement): + + def __init__(self, ruleName:str, tokenType:int): + super().__init__(ruleName) + self.tokenType = tokenType + + def evaluate(self, t:ParseTree): + # return all TerminalNode descendants of t that match tokenType (or do not match if inverted) + return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.descendants(t)) + +class XPathTokenElement(XPathElement): + + def __init__(self, ruleName:str, tokenType:int): + super().__init__(ruleName) + self.tokenType = tokenType + + def evaluate(self, t:ParseTree): + # return all TerminalNode children of t that match tokenType (or do not match if inverted) + return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.getChildren(t)) + + +class XPathWildcardAnywhereElement(XPathElement): + + def __init__(self): + super().__init__(XPath.WILDCARD) + + def evaluate(self, t:ParseTree): + if self.invert: + return list() # !* is weird but valid (empty) + else: + return Trees.descendants(t) + + +class XPathWildcardElement(XPathElement): + + def __init__(self): + super().__init__(XPath.WILDCARD) + + + def evaluate(self, t:ParseTree): + if self.invert: + return list() # !* is weird but valid (empty) + else: + return Trees.getChildren(t) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/xpath/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/xpath/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..216c000dc5ffc8e53cc9c596e420c1e67604d1aa --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/xpath/__init__.py @@ -0,0 +1 @@ +__author__ = 'ericvergnaud' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/objectify.cpython-38-x86_64-linux-gnu.so b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/objectify.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a4b854920b602131c9e30a812d40260478987df7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/objectify.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fc2c2f00ed7144dfeb12897e04f51dd6bf905930578bbe052a0e474b9ee7312 +size 4436376 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/__pycache__/backend_bases.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/__pycache__/backend_bases.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52f395c48b08a87a9f940e247d4c235a3194a4d2 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/__pycache__/backend_bases.cpython-38.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c2eb33aec3f02afd03cdb233ada4c92a78615d25f8c2a174a593b4b2df9df19 +size 115881 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_api/__pycache__/__init__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_api/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..939c54f288167251c6d6c1a05a07c4d2e07ddc20 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_api/__pycache__/__init__.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_api/__pycache__/deprecation.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_api/__pycache__/deprecation.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff0f50cb5de938e2ca02df1b4392a8898cba4539 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_api/__pycache__/deprecation.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/cbook/__pycache__/__init__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/cbook/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6d5866e440544033408051d686076f90e8a8e0c Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/cbook/__pycache__/__init__.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/cbook/__pycache__/deprecation.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/cbook/__pycache__/deprecation.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edf0722dd7d1903c1e0f07d41865cc83c8f52a92 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/cbook/__pycache__/deprecation.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/Epoch.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/Epoch.py new file mode 100644 index 0000000000000000000000000000000000000000..3808663056e7687f742e56d0392486ae287d5a58 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/Epoch.py @@ -0,0 +1,211 @@ +"""Epoch module.""" + +import functools +import operator +import math +import datetime as DT + +from matplotlib import _api +from matplotlib.dates import date2num + + +class Epoch: + # Frame conversion offsets in seconds + # t(TO) = t(FROM) + allowed[ FROM ][ TO ] + allowed = { + "ET": { + "UTC": +64.1839, + }, + "UTC": { + "ET": -64.1839, + }, + } + + def __init__(self, frame, sec=None, jd=None, daynum=None, dt=None): + """ + Create a new Epoch object. + + Build an epoch 1 of 2 ways: + + Using seconds past a Julian date: + # Epoch('ET', sec=1e8, jd=2451545) + + or using a matplotlib day number + # Epoch('ET', daynum=730119.5) + + = ERROR CONDITIONS + - If the input units are not in the allowed list, an error is thrown. + + = INPUT VARIABLES + - frame The frame of the epoch. Must be 'ET' or 'UTC' + - sec The number of seconds past the input JD. + - jd The Julian date of the epoch. + - daynum The matplotlib day number of the epoch. + - dt A python datetime instance. + """ + if ((sec is None and jd is not None) or + (sec is not None and jd is None) or + (daynum is not None and + (sec is not None or jd is not None)) or + (daynum is None and dt is None and + (sec is None or jd is None)) or + (daynum is not None and dt is not None) or + (dt is not None and (sec is not None or jd is not None)) or + ((dt is not None) and not isinstance(dt, DT.datetime))): + raise ValueError( + "Invalid inputs. Must enter sec and jd together, " + "daynum by itself, or dt (must be a python datetime).\n" + "Sec = %s\n" + "JD = %s\n" + "dnum= %s\n" + "dt = %s" % (sec, jd, daynum, dt)) + + _api.check_in_list(self.allowed, frame=frame) + self._frame = frame + + if dt is not None: + daynum = date2num(dt) + + if daynum is not None: + # 1-JAN-0001 in JD = 1721425.5 + jd = float(daynum) + 1721425.5 + self._jd = math.floor(jd) + self._seconds = (jd - self._jd) * 86400.0 + + else: + self._seconds = float(sec) + self._jd = float(jd) + + # Resolve seconds down to [ 0, 86400) + deltaDays = math.floor(self._seconds / 86400) + self._jd += deltaDays + self._seconds -= deltaDays * 86400.0 + + def convert(self, frame): + if self._frame == frame: + return self + + offset = self.allowed[self._frame][frame] + + return Epoch(frame, self._seconds + offset, self._jd) + + def frame(self): + return self._frame + + def julianDate(self, frame): + t = self + if frame != self._frame: + t = self.convert(frame) + + return t._jd + t._seconds / 86400.0 + + def secondsPast(self, frame, jd): + t = self + if frame != self._frame: + t = self.convert(frame) + + delta = t._jd - jd + return t._seconds + delta * 86400 + + def _cmp(self, op, rhs): + """Compare Epochs *self* and *rhs* using operator *op*.""" + t = self + if self._frame != rhs._frame: + t = self.convert(rhs._frame) + if t._jd != rhs._jd: + return op(t._jd, rhs._jd) + return op(t._seconds, rhs._seconds) + + __eq__ = functools.partialmethod(_cmp, operator.eq) + __ne__ = functools.partialmethod(_cmp, operator.ne) + __lt__ = functools.partialmethod(_cmp, operator.lt) + __le__ = functools.partialmethod(_cmp, operator.le) + __gt__ = functools.partialmethod(_cmp, operator.gt) + __ge__ = functools.partialmethod(_cmp, operator.ge) + + def __add__(self, rhs): + """ + Add a duration to an Epoch. + + = INPUT VARIABLES + - rhs The Epoch to subtract. + + = RETURN VALUE + - Returns the difference of ourselves and the input Epoch. + """ + t = self + if self._frame != rhs.frame(): + t = self.convert(rhs._frame) + + sec = t._seconds + rhs.seconds() + + return Epoch(t._frame, sec, t._jd) + + def __sub__(self, rhs): + """ + Subtract two Epoch's or a Duration from an Epoch. + + Valid: + Duration = Epoch - Epoch + Epoch = Epoch - Duration + + = INPUT VARIABLES + - rhs The Epoch to subtract. + + = RETURN VALUE + - Returns either the duration between to Epoch's or the a new + Epoch that is the result of subtracting a duration from an epoch. + """ + # Delay-load due to circular dependencies. + import matplotlib.testing.jpl_units as U + + # Handle Epoch - Duration + if isinstance(rhs, U.Duration): + return self + -rhs + + t = self + if self._frame != rhs._frame: + t = self.convert(rhs._frame) + + days = t._jd - rhs._jd + sec = t._seconds - rhs._seconds + + return U.Duration(rhs._frame, days*86400 + sec) + + def __str__(self): + """Print the Epoch.""" + return "%22.15e %s" % (self.julianDate(self._frame), self._frame) + + def __repr__(self): + """Print the Epoch.""" + return str(self) + + @staticmethod + def range(start, stop, step): + """ + Generate a range of Epoch objects. + + Similar to the Python range() method. Returns the range [ + start, stop) at the requested step. Each element will be a + Epoch object. + + = INPUT VARIABLES + - start The starting value of the range. + - stop The stop value of the range. + - step Step to use. + + = RETURN VALUE + - Returns a list containing the requested Epoch values. + """ + elems = [] + + i = 0 + while True: + d = start + i * step + if d >= stop: + break + + elems.append(d) + i += 1 + + return elems diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/StrConverter.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/StrConverter.py new file mode 100644 index 0000000000000000000000000000000000000000..a62d4981dc79201214dc926eaa6a4c74ffcba078 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/StrConverter.py @@ -0,0 +1,97 @@ +"""StrConverter module containing class StrConverter.""" + +import numpy as np + +import matplotlib.units as units + +__all__ = ['StrConverter'] + + +class StrConverter(units.ConversionInterface): + """ + A Matplotlib converter class for string data values. + + Valid units for string are: + - 'indexed' : Values are indexed as they are specified for plotting. + - 'sorted' : Values are sorted alphanumerically. + - 'inverted' : Values are inverted so that the first value is on top. + - 'sorted-inverted' : A combination of 'sorted' and 'inverted' + """ + + @staticmethod + def axisinfo(unit, axis): + # docstring inherited + return None + + @staticmethod + def convert(value, unit, axis): + # docstring inherited + + if value == []: + return [] + + # we delay loading to make matplotlib happy + ax = axis.axes + if axis is ax.xaxis: + isXAxis = True + else: + isXAxis = False + + axis.get_major_ticks() + ticks = axis.get_ticklocs() + labels = axis.get_ticklabels() + + labels = [l.get_text() for l in labels if l.get_text()] + + if not labels: + ticks = [] + labels = [] + + if not np.iterable(value): + value = [value] + + newValues = [] + for v in value: + if v not in labels and v not in newValues: + newValues.append(v) + + labels.extend(newValues) + + # DISABLED: This is disabled because matplotlib bar plots do not + # DISABLED: recalculate the unit conversion of the data values + # DISABLED: this is due to design and is not really a bug. + # DISABLED: If this gets changed, then we can activate the following + # DISABLED: block of code. Note that this works for line plots. + # DISABLED if unit: + # DISABLED if unit.find("sorted") > -1: + # DISABLED labels.sort() + # DISABLED if unit.find("inverted") > -1: + # DISABLED labels = labels[::-1] + + # add padding (so they do not appear on the axes themselves) + labels = [''] + labels + [''] + ticks = list(range(len(labels))) + ticks[0] = 0.5 + ticks[-1] = ticks[-1] - 0.5 + + axis.set_ticks(ticks) + axis.set_ticklabels(labels) + # we have to do the following lines to make ax.autoscale_view work + loc = axis.get_major_locator() + loc.set_bounds(ticks[0], ticks[-1]) + + if isXAxis: + ax.set_xlim(ticks[0], ticks[-1]) + else: + ax.set_ylim(ticks[0], ticks[-1]) + + result = [ticks[labels.index(v)] for v in value] + + ax.viewLim.ignore(-1) + return result + + @staticmethod + def default_units(value, axis): + # docstring inherited + # The default behavior for string indexing. + return "indexed" diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/UnitDbl.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/UnitDbl.py new file mode 100644 index 0000000000000000000000000000000000000000..b713ab3aba67a3e016d83ad60df931370b11282e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/UnitDbl.py @@ -0,0 +1,180 @@ +"""UnitDbl module.""" + +import functools +import operator + +from matplotlib import _api + + +class UnitDbl: + """Class UnitDbl in development.""" + + # Unit conversion table. Small subset of the full one but enough + # to test the required functions. First field is a scale factor to + # convert the input units to the units of the second field. Only + # units in this table are allowed. + allowed = { + "m": (0.001, "km"), + "km": (1, "km"), + "mile": (1.609344, "km"), + + "rad": (1, "rad"), + "deg": (1.745329251994330e-02, "rad"), + + "sec": (1, "sec"), + "min": (60.0, "sec"), + "hour": (3600, "sec"), + } + + _types = { + "km": "distance", + "rad": "angle", + "sec": "time", + } + + def __init__(self, value, units): + """ + Create a new UnitDbl object. + + Units are internally converted to km, rad, and sec. The only + valid inputs for units are [m, km, mile, rad, deg, sec, min, hour]. + + The field UnitDbl.value will contain the converted value. Use + the convert() method to get a specific type of units back. + + = ERROR CONDITIONS + - If the input units are not in the allowed list, an error is thrown. + + = INPUT VARIABLES + - value The numeric value of the UnitDbl. + - units The string name of the units the value is in. + """ + data = _api.check_getitem(self.allowed, units=units) + self._value = float(value * data[0]) + self._units = data[1] + + def convert(self, units): + """ + Convert the UnitDbl to a specific set of units. + + = ERROR CONDITIONS + - If the input units are not in the allowed list, an error is thrown. + + = INPUT VARIABLES + - units The string name of the units to convert to. + + = RETURN VALUE + - Returns the value of the UnitDbl in the requested units as a floating + point number. + """ + if self._units == units: + return self._value + data = _api.check_getitem(self.allowed, units=units) + if self._units != data[1]: + raise ValueError(f"Error trying to convert to different units.\n" + f" Invalid conversion requested.\n" + f" UnitDbl: {self}\n" + f" Units: {units}\n") + return self._value / data[0] + + def __abs__(self): + """Return the absolute value of this UnitDbl.""" + return UnitDbl(abs(self._value), self._units) + + def __neg__(self): + """Return the negative value of this UnitDbl.""" + return UnitDbl(-self._value, self._units) + + def __bool__(self): + """Return the truth value of a UnitDbl.""" + return bool(self._value) + + def _cmp(self, op, rhs): + """Check that *self* and *rhs* share units; compare them using *op*.""" + self.checkSameUnits(rhs, "compare") + return op(self._value, rhs._value) + + __eq__ = functools.partialmethod(_cmp, operator.eq) + __ne__ = functools.partialmethod(_cmp, operator.ne) + __lt__ = functools.partialmethod(_cmp, operator.lt) + __le__ = functools.partialmethod(_cmp, operator.le) + __gt__ = functools.partialmethod(_cmp, operator.gt) + __ge__ = functools.partialmethod(_cmp, operator.ge) + + def _binop_unit_unit(self, op, rhs): + """Check that *self* and *rhs* share units; combine them using *op*.""" + self.checkSameUnits(rhs, op.__name__) + return UnitDbl(op(self._value, rhs._value), self._units) + + __add__ = functools.partialmethod(_binop_unit_unit, operator.add) + __sub__ = functools.partialmethod(_binop_unit_unit, operator.sub) + + def _binop_unit_scalar(self, op, scalar): + """Combine *self* and *scalar* using *op*.""" + return UnitDbl(op(self._value, scalar), self._units) + + __mul__ = functools.partialmethod(_binop_unit_scalar, operator.mul) + __rmul__ = functools.partialmethod(_binop_unit_scalar, operator.mul) + + def __str__(self): + """Print the UnitDbl.""" + return "%g *%s" % (self._value, self._units) + + def __repr__(self): + """Print the UnitDbl.""" + return "UnitDbl(%g, '%s')" % (self._value, self._units) + + def type(self): + """Return the type of UnitDbl data.""" + return self._types[self._units] + + @staticmethod + def range(start, stop, step=None): + """ + Generate a range of UnitDbl objects. + + Similar to the Python range() method. Returns the range [ + start, stop) at the requested step. Each element will be a + UnitDbl object. + + = INPUT VARIABLES + - start The starting value of the range. + - stop The stop value of the range. + - step Optional step to use. If set to None, then a UnitDbl of + value 1 w/ the units of the start is used. + + = RETURN VALUE + - Returns a list containing the requested UnitDbl values. + """ + if step is None: + step = UnitDbl(1, start._units) + + elems = [] + + i = 0 + while True: + d = start + i * step + if d >= stop: + break + + elems.append(d) + i += 1 + + return elems + + def checkSameUnits(self, rhs, func): + """ + Check to see if units are the same. + + = ERROR CONDITIONS + - If the units of the rhs UnitDbl are not the same as our units, + an error is thrown. + + = INPUT VARIABLES + - rhs The UnitDbl to check for the same units + - func The name of the function doing the check. + """ + if self._units != rhs._units: + raise ValueError(f"Cannot {func} units of different types.\n" + f"LHS: {self._units}\n" + f"RHS: {rhs._units}") diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/UnitDblConverter.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/UnitDblConverter.py new file mode 100644 index 0000000000000000000000000000000000000000..859449a1ee757fddd2724036b9b88b48ade05f08 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/UnitDblConverter.py @@ -0,0 +1,86 @@ +"""UnitDblConverter module containing class UnitDblConverter.""" + +import numpy as np + +from matplotlib import cbook +import matplotlib.units as units +import matplotlib.projections.polar as polar + +__all__ = ['UnitDblConverter'] + + +# A special function for use with the matplotlib FuncFormatter class +# for formatting axes with radian units. +# This was copied from matplotlib example code. +def rad_fn(x, pos=None): + """Radian function formatter.""" + n = int((x / np.pi) * 2.0 + 0.25) + if n == 0: + return str(x) + elif n == 1: + return r'$\pi/2$' + elif n == 2: + return r'$\pi$' + elif n % 2 == 0: + return fr'${n//2}\pi$' + else: + return fr'${n}\pi/2$' + + +class UnitDblConverter(units.ConversionInterface): + """ + Provides Matplotlib conversion functionality for the Monte UnitDbl class. + """ + # default for plotting + defaults = { + "distance": 'km', + "angle": 'deg', + "time": 'sec', + } + + @staticmethod + def axisinfo(unit, axis): + # docstring inherited + + # Delay-load due to circular dependencies. + import matplotlib.testing.jpl_units as U + + # Check to see if the value used for units is a string unit value + # or an actual instance of a UnitDbl so that we can use the unit + # value for the default axis label value. + if unit: + label = unit if isinstance(unit, str) else unit.label() + else: + label = None + + if label == "deg" and isinstance(axis.axes, polar.PolarAxes): + # If we want degrees for a polar plot, use the PolarPlotFormatter + majfmt = polar.PolarAxes.ThetaFormatter() + else: + majfmt = U.UnitDblFormatter(useOffset=False) + + return units.AxisInfo(majfmt=majfmt, label=label) + + @staticmethod + def convert(value, unit, axis): + # docstring inherited + if not cbook.is_scalar_or_string(value): + return [UnitDblConverter.convert(x, unit, axis) for x in value] + # If no units were specified, then get the default units to use. + if unit is None: + unit = UnitDblConverter.default_units(value, axis) + # Convert the incoming UnitDbl value/values to float/floats + if isinstance(axis.axes, polar.PolarAxes) and value.type() == "angle": + # Guarantee that units are radians for polar plots. + return value.convert("rad") + return value.convert(unit) + + @staticmethod + def default_units(value, axis): + # docstring inherited + # Determine the default units based on the user preferences set for + # default units when printing a UnitDbl. + if cbook.is_scalar_or_string(value): + return UnitDblConverter.defaults[value.type()] + else: + return UnitDblConverter.default_units(value[0], axis) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b8caa9a8957a250b78712c25175bec415507e416 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__init__.py @@ -0,0 +1,76 @@ +""" +A sample set of units for use with testing unit conversion +of Matplotlib routines. These are used because they use very strict +enforcement of unitized data which will test the entire spectrum of how +unitized data might be used (it is not always meaningful to convert to +a float without specific units given). + +UnitDbl is essentially a unitized floating point number. It has a +minimal set of supported units (enough for testing purposes). All +of the mathematical operation are provided to fully test any behaviour +that might occur with unitized data. Remember that unitized data has +rules as to how it can be applied to one another (a value of distance +cannot be added to a value of time). Thus we need to guard against any +accidental "default" conversion that will strip away the meaning of the +data and render it neutered. + +Epoch is different than a UnitDbl of time. Time is something that can be +measured where an Epoch is a specific moment in time. Epochs are typically +referenced as an offset from some predetermined epoch. + +A difference of two epochs is a Duration. The distinction between a Duration +and a UnitDbl of time is made because an Epoch can have different frames (or +units). In the case of our test Epoch class the two allowed frames are 'UTC' +and 'ET' (Note that these are rough estimates provided for testing purposes +and should not be used in production code where accuracy of time frames is +desired). As such a Duration also has a frame of reference and therefore needs +to be called out as different that a simple measurement of time since a delta-t +in one frame may not be the same in another. +""" + +from .Duration import Duration +from .Epoch import Epoch +from .UnitDbl import UnitDbl + +from .StrConverter import StrConverter +from .EpochConverter import EpochConverter +from .UnitDblConverter import UnitDblConverter + +from .UnitDblFormatter import UnitDblFormatter + + +__version__ = "1.0" + +__all__ = [ + 'register', + 'Duration', + 'Epoch', + 'UnitDbl', + 'UnitDblFormatter', + ] + + +def register(): + """Register the unit conversion classes with matplotlib.""" + import matplotlib.units as mplU + + mplU.registry[str] = StrConverter() + mplU.registry[Epoch] = EpochConverter() + mplU.registry[Duration] = EpochConverter() + mplU.registry[UnitDbl] = UnitDblConverter() + + +# Some default unit instances +# Distances +m = UnitDbl(1.0, "m") +km = UnitDbl(1.0, "km") +mile = UnitDbl(1.0, "mile") +# Angles +deg = UnitDbl(1.0, "deg") +rad = UnitDbl(1.0, "rad") +# Time +sec = UnitDbl(1.0, "sec") +min = UnitDbl(1.0, "min") +hr = UnitDbl(1.0, "hour") +day = UnitDbl(24.0, "hour") +sec = UnitDbl(1.0, "sec") diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/Duration.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/Duration.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daf91c8cfd571c1551188ccf5e3db9577f0fa241 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/Duration.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/Epoch.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/Epoch.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4fad34560e0080f8987e0d5173dc20fe7ea8f61 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/Epoch.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/EpochConverter.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/EpochConverter.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b1a24f2b39f174962b0386708e9b6ca19a6d370 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/EpochConverter.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/StrConverter.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/StrConverter.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdf93bc6015fdebacda89a3b971104de1319ddd6 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/StrConverter.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/UnitDbl.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/UnitDbl.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72b3b0f39e79867d643a178c2ee350c9490855df Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/UnitDbl.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/UnitDblConverter.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/UnitDblConverter.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab372d15a3dee1905098d89d2ede671a081e062a Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/UnitDblConverter.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/UnitDblFormatter.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/UnitDblFormatter.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e67382a7994b6c09c23bbeaefbc29a233acc70ee Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/UnitDblFormatter.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/__init__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0d94636bd182d590fda259c8b6ddc2e4118d0e4 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/testing/jpl_units/__pycache__/__init__.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4aaa1c1aec5aff87314993d9bffc7448e14d8d2f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-38.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e6d25c6e04f31ab1fdf128fa2361a8900cebc01aa99ebe66d3fc900bdbe0b98 +size 185385 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/__pycache__/__init__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a83d7e4c9d04b1c16ad9c2c246940a0c45dd698b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/__pycache__/__init__.cpython-38.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5518ea48b87b1c5af238ae2beff843eb4d18e03229044d790394c67588ebf3a +size 101206 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/__init__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7652796c20a5224c1b4729f989d95bc57c0a44e8 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/__init__.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/__main__.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/__main__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f65cc51836f0bb6c533c739c314622ea4b8fb141 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/__main__.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/bdist_wheel.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/bdist_wheel.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..135948b685d776e5644599d0d55e14b52ba10ed2 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/bdist_wheel.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/macosx_libfile.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/macosx_libfile.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af5b2cc67bb17b3d803958c12b5493e9b809cf13 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/macosx_libfile.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/metadata.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/metadata.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78dd45677a68f9ce4a1137094f23744674515003 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/metadata.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/pkginfo.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/pkginfo.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32a83374ad8c1f97d4b5ed201bef0e429458d1fd Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/pkginfo.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/util.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/util.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6e127bed126a9fc6519c3a320cc2de4f8b59b0f Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/util.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/wheelfile.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/wheelfile.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2746d26e8395e875ae9dde96af9d65cc9a9cba1f Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/__pycache__/wheelfile.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..95740bfb650514f6b17005b4bc8220858ebdfdf4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/__init__.py @@ -0,0 +1,88 @@ +""" +Wheel command-line utility. +""" + +from __future__ import print_function + +import argparse +import os +import sys + + +def require_pkgresources(name): + try: + import pkg_resources # noqa: F401 + except ImportError: + raise RuntimeError("'{0}' needs pkg_resources (part of setuptools).".format(name)) + + +class WheelError(Exception): + pass + + +def unpack_f(args): + from .unpack import unpack + unpack(args.wheelfile, args.dest) + + +def pack_f(args): + from .pack import pack + pack(args.directory, args.dest_dir, args.build_number) + + +def convert_f(args): + from .convert import convert + convert(args.files, args.dest_dir, args.verbose) + + +def version_f(args): + from .. import __version__ + print("wheel %s" % __version__) + + +def parser(): + p = argparse.ArgumentParser() + s = p.add_subparsers(help="commands") + + unpack_parser = s.add_parser('unpack', help='Unpack wheel') + unpack_parser.add_argument('--dest', '-d', help='Destination directory', + default='.') + unpack_parser.add_argument('wheelfile', help='Wheel file') + unpack_parser.set_defaults(func=unpack_f) + + repack_parser = s.add_parser('pack', help='Repack wheel') + repack_parser.add_argument('directory', help='Root directory of the unpacked wheel') + repack_parser.add_argument('--dest-dir', '-d', default=os.path.curdir, + help="Directory to store the wheel (default %(default)s)") + repack_parser.add_argument('--build-number', help="Build tag to use in the wheel name") + repack_parser.set_defaults(func=pack_f) + + convert_parser = s.add_parser('convert', help='Convert egg or wininst to wheel') + convert_parser.add_argument('files', nargs='*', help='Files to convert') + convert_parser.add_argument('--dest-dir', '-d', default=os.path.curdir, + help="Directory to store wheels (default %(default)s)") + convert_parser.add_argument('--verbose', '-v', action='store_true') + convert_parser.set_defaults(func=convert_f) + + version_parser = s.add_parser('version', help='Print version and exit') + version_parser.set_defaults(func=version_f) + + help_parser = s.add_parser('help', help='Show this help') + help_parser.set_defaults(func=lambda args: p.print_help()) + + return p + + +def main(): + p = parser() + args = p.parse_args() + if not hasattr(args, 'func'): + p.print_help() + else: + try: + args.func(args) + return 0 + except WheelError as e: + print(e, file=sys.stderr) + + return 1 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/convert.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..154f1b1e2a5b84ade4d44c9a5226b7979d21958b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/convert.py @@ -0,0 +1,269 @@ +import os.path +import re +import shutil +import sys +import tempfile +import zipfile +from distutils import dist +from glob import iglob + +from ..bdist_wheel import bdist_wheel +from ..wheelfile import WheelFile +from . import WheelError, require_pkgresources + +egg_info_re = re.compile(r''' + (?P.+?)-(?P.+?) + (-(?Ppy\d\.\d+) + (-(?P.+?))? + )?.egg$''', re.VERBOSE) + + +class _bdist_wheel_tag(bdist_wheel): + # allow the client to override the default generated wheel tag + # The default bdist_wheel implementation uses python and abi tags + # of the running python process. This is not suitable for + # generating/repackaging prebuild binaries. + + full_tag_supplied = False + full_tag = None # None or a (pytag, soabitag, plattag) triple + + def get_tag(self): + if self.full_tag_supplied and self.full_tag is not None: + return self.full_tag + else: + return bdist_wheel.get_tag(self) + + +def egg2wheel(egg_path, dest_dir): + filename = os.path.basename(egg_path) + match = egg_info_re.match(filename) + if not match: + raise WheelError('Invalid egg file name: {}'.format(filename)) + + egg_info = match.groupdict() + dir = tempfile.mkdtemp(suffix="_e2w") + if os.path.isfile(egg_path): + # assume we have a bdist_egg otherwise + with zipfile.ZipFile(egg_path) as egg: + egg.extractall(dir) + else: + # support buildout-style installed eggs directories + for pth in os.listdir(egg_path): + src = os.path.join(egg_path, pth) + if os.path.isfile(src): + shutil.copy2(src, dir) + else: + shutil.copytree(src, os.path.join(dir, pth)) + + pyver = egg_info['pyver'] + if pyver: + pyver = egg_info['pyver'] = pyver.replace('.', '') + + arch = (egg_info['arch'] or 'any').replace('.', '_').replace('-', '_') + + # assume all binary eggs are for CPython + abi = 'cp' + pyver[2:] if arch != 'any' else 'none' + + root_is_purelib = egg_info['arch'] is None + if root_is_purelib: + bw = bdist_wheel(dist.Distribution()) + else: + bw = _bdist_wheel_tag(dist.Distribution()) + + bw.root_is_pure = root_is_purelib + bw.python_tag = pyver + bw.plat_name_supplied = True + bw.plat_name = egg_info['arch'] or 'any' + if not root_is_purelib: + bw.full_tag_supplied = True + bw.full_tag = (pyver, abi, arch) + + dist_info_dir = os.path.join(dir, '{name}-{ver}.dist-info'.format(**egg_info)) + bw.egg2dist(os.path.join(dir, 'EGG-INFO'), dist_info_dir) + bw.write_wheelfile(dist_info_dir, generator='egg2wheel') + wheel_name = '{name}-{ver}-{pyver}-{}-{}.whl'.format(abi, arch, **egg_info) + with WheelFile(os.path.join(dest_dir, wheel_name), 'w') as wf: + wf.write_files(dir) + + shutil.rmtree(dir) + + +def parse_wininst_info(wininfo_name, egginfo_name): + """Extract metadata from filenames. + + Extracts the 4 metadataitems needed (name, version, pyversion, arch) from + the installer filename and the name of the egg-info directory embedded in + the zipfile (if any). + + The egginfo filename has the format:: + + name-ver(-pyver)(-arch).egg-info + + The installer filename has the format:: + + name-ver.arch(-pyver).exe + + Some things to note: + + 1. The installer filename is not definitive. An installer can be renamed + and work perfectly well as an installer. So more reliable data should + be used whenever possible. + 2. The egg-info data should be preferred for the name and version, because + these come straight from the distutils metadata, and are mandatory. + 3. The pyver from the egg-info data should be ignored, as it is + constructed from the version of Python used to build the installer, + which is irrelevant - the installer filename is correct here (even to + the point that when it's not there, any version is implied). + 4. The architecture must be taken from the installer filename, as it is + not included in the egg-info data. + 5. Architecture-neutral installers still have an architecture because the + installer format itself (being executable) is architecture-specific. We + should therefore ignore the architecture if the content is pure-python. + """ + + egginfo = None + if egginfo_name: + egginfo = egg_info_re.search(egginfo_name) + if not egginfo: + raise ValueError("Egg info filename %s is not valid" % (egginfo_name,)) + + # Parse the wininst filename + # 1. Distribution name (up to the first '-') + w_name, sep, rest = wininfo_name.partition('-') + if not sep: + raise ValueError("Installer filename %s is not valid" % (wininfo_name,)) + + # Strip '.exe' + rest = rest[:-4] + # 2. Python version (from the last '-', must start with 'py') + rest2, sep, w_pyver = rest.rpartition('-') + if sep and w_pyver.startswith('py'): + rest = rest2 + w_pyver = w_pyver.replace('.', '') + else: + # Not version specific - use py2.py3. While it is possible that + # pure-Python code is not compatible with both Python 2 and 3, there + # is no way of knowing from the wininst format, so we assume the best + # here (the user can always manually rename the wheel to be more + # restrictive if needed). + w_pyver = 'py2.py3' + # 3. Version and architecture + w_ver, sep, w_arch = rest.rpartition('.') + if not sep: + raise ValueError("Installer filename %s is not valid" % (wininfo_name,)) + + if egginfo: + w_name = egginfo.group('name') + w_ver = egginfo.group('ver') + + return {'name': w_name, 'ver': w_ver, 'arch': w_arch, 'pyver': w_pyver} + + +def wininst2wheel(path, dest_dir): + with zipfile.ZipFile(path) as bdw: + # Search for egg-info in the archive + egginfo_name = None + for filename in bdw.namelist(): + if '.egg-info' in filename: + egginfo_name = filename + break + + info = parse_wininst_info(os.path.basename(path), egginfo_name) + + root_is_purelib = True + for zipinfo in bdw.infolist(): + if zipinfo.filename.startswith('PLATLIB'): + root_is_purelib = False + break + if root_is_purelib: + paths = {'purelib': ''} + else: + paths = {'platlib': ''} + + dist_info = "%(name)s-%(ver)s" % info + datadir = "%s.data/" % dist_info + + # rewrite paths to trick ZipFile into extracting an egg + # XXX grab wininst .ini - between .exe, padding, and first zip file. + members = [] + egginfo_name = '' + for zipinfo in bdw.infolist(): + key, basename = zipinfo.filename.split('/', 1) + key = key.lower() + basepath = paths.get(key, None) + if basepath is None: + basepath = datadir + key.lower() + '/' + oldname = zipinfo.filename + newname = basepath + basename + zipinfo.filename = newname + del bdw.NameToInfo[oldname] + bdw.NameToInfo[newname] = zipinfo + # Collect member names, but omit '' (from an entry like "PLATLIB/" + if newname: + members.append(newname) + # Remember egg-info name for the egg2dist call below + if not egginfo_name: + if newname.endswith('.egg-info'): + egginfo_name = newname + elif '.egg-info/' in newname: + egginfo_name, sep, _ = newname.rpartition('/') + dir = tempfile.mkdtemp(suffix="_b2w") + bdw.extractall(dir, members) + + # egg2wheel + abi = 'none' + pyver = info['pyver'] + arch = (info['arch'] or 'any').replace('.', '_').replace('-', '_') + # Wininst installers always have arch even if they are not + # architecture-specific (because the format itself is). + # So, assume the content is architecture-neutral if root is purelib. + if root_is_purelib: + arch = 'any' + # If the installer is architecture-specific, it's almost certainly also + # CPython-specific. + if arch != 'any': + pyver = pyver.replace('py', 'cp') + wheel_name = '-'.join((dist_info, pyver, abi, arch)) + if root_is_purelib: + bw = bdist_wheel(dist.Distribution()) + else: + bw = _bdist_wheel_tag(dist.Distribution()) + + bw.root_is_pure = root_is_purelib + bw.python_tag = pyver + bw.plat_name_supplied = True + bw.plat_name = info['arch'] or 'any' + + if not root_is_purelib: + bw.full_tag_supplied = True + bw.full_tag = (pyver, abi, arch) + + dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info) + bw.egg2dist(os.path.join(dir, egginfo_name), dist_info_dir) + bw.write_wheelfile(dist_info_dir, generator='wininst2wheel') + + wheel_path = os.path.join(dest_dir, wheel_name) + with WheelFile(wheel_path, 'w') as wf: + wf.write_files(dir) + + shutil.rmtree(dir) + + +def convert(files, dest_dir, verbose): + # Only support wheel convert if pkg_resources is present + require_pkgresources('wheel convert') + + for pat in files: + for installer in iglob(pat): + if os.path.splitext(installer)[1] == '.egg': + conv = egg2wheel + else: + conv = wininst2wheel + + if verbose: + print("{}... ".format(installer)) + sys.stdout.flush() + + conv(installer, dest_dir) + if verbose: + print("OK") diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/pack.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/pack.py new file mode 100644 index 0000000000000000000000000000000000000000..1e77fdbd2ccfb17ce1ca65f1834ac1dc927f3047 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/pack.py @@ -0,0 +1,79 @@ +from __future__ import print_function + +import os.path +import re +import sys + +from wheel.cli import WheelError +from wheel.wheelfile import WheelFile + +DIST_INFO_RE = re.compile(r"^(?P(?P.+?)-(?P\d.*?))\.dist-info$") +BUILD_NUM_RE = re.compile(br'Build: (\d\w*)$') + + +def pack(directory, dest_dir, build_number): + """Repack a previously unpacked wheel directory into a new wheel file. + + The .dist-info/WHEEL file must contain one or more tags so that the target + wheel file name can be determined. + + :param directory: The unpacked wheel directory + :param dest_dir: Destination directory (defaults to the current directory) + """ + # Find the .dist-info directory + dist_info_dirs = [fn for fn in os.listdir(directory) + if os.path.isdir(os.path.join(directory, fn)) and DIST_INFO_RE.match(fn)] + if len(dist_info_dirs) > 1: + raise WheelError('Multiple .dist-info directories found in {}'.format(directory)) + elif not dist_info_dirs: + raise WheelError('No .dist-info directories found in {}'.format(directory)) + + # Determine the target wheel filename + dist_info_dir = dist_info_dirs[0] + name_version = DIST_INFO_RE.match(dist_info_dir).group('namever') + + # Read the tags and the existing build number from .dist-info/WHEEL + existing_build_number = None + wheel_file_path = os.path.join(directory, dist_info_dir, 'WHEEL') + with open(wheel_file_path) as f: + tags = [] + for line in f: + if line.startswith('Tag: '): + tags.append(line.split(' ')[1].rstrip()) + elif line.startswith('Build: '): + existing_build_number = line.split(' ')[1].rstrip() + + if not tags: + raise WheelError('No tags present in {}/WHEEL; cannot determine target wheel filename' + .format(dist_info_dir)) + + # Set the wheel file name and add/replace/remove the Build tag in .dist-info/WHEEL + build_number = build_number if build_number is not None else existing_build_number + if build_number is not None: + if build_number: + name_version += '-' + build_number + + if build_number != existing_build_number: + replacement = ('Build: %s\r\n' % build_number).encode('ascii') if build_number else b'' + with open(wheel_file_path, 'rb+') as f: + wheel_file_content = f.read() + if not BUILD_NUM_RE.subn(replacement, wheel_file_content)[1]: + wheel_file_content += replacement + + f.truncate() + f.write(wheel_file_content) + + # Reassemble the tags for the wheel file + impls = sorted({tag.split('-')[0] for tag in tags}) + abivers = sorted({tag.split('-')[1] for tag in tags}) + platforms = sorted({tag.split('-')[2] for tag in tags}) + tagline = '-'.join(['.'.join(impls), '.'.join(abivers), '.'.join(platforms)]) + + # Repack the wheel + wheel_path = os.path.join(dest_dir, '{}-{}.whl'.format(name_version, tagline)) + with WheelFile(wheel_path, 'w') as wf: + print("Repacking wheel as {}...".format(wheel_path), end='') + sys.stdout.flush() + wf.write_files(directory) + + print('OK') diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/unpack.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/unpack.py new file mode 100644 index 0000000000000000000000000000000000000000..2e9857a35088ebec8d0a51c4be09caa66320435e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/cli/unpack.py @@ -0,0 +1,25 @@ +from __future__ import print_function + +import os.path +import sys + +from ..wheelfile import WheelFile + + +def unpack(path, dest='.'): + """Unpack a wheel. + + Wheel content will be unpacked to {dest}/{name}-{ver}, where {name} + is the package name and {ver} its version. + + :param path: The path to the wheel. + :param dest: Destination directory (default to current directory). + """ + with WheelFile(path) as wf: + namever = wf.parsed_filename.group('namever') + destination = os.path.join(dest, namever) + print("Unpacking to: {}...".format(destination), end='') + sys.stdout.flush() + wf.extractall(destination) + + print('OK') diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/vendored/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wheel/vendored/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391