diff --git a/.gitattributes b/.gitattributes index 308365b51190ee3605b046cae6ff4493aadf2083..336a200d8b6e9bd02a1c27eb7ce4ec3cd158ee78 100644 --- a/.gitattributes +++ b/.gitattributes @@ -370,3 +370,7 @@ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor my_container_sandbox/workspace/anaconda3/lib/libnppist.so.11.3.3.95 filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/more.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor/pyparsing/__pycache__/core.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/__pycache__/__init__.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/__pycache__/backend_bases.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/objectify.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/CommonTokenFactory.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/CommonTokenFactory.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5931604af71ab88257f2658bc8e5f30ede5c4b6a Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/CommonTokenFactory.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/InputStream.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/InputStream.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..124cc97838ae2ed18a999f1709422aa7d23533fc Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/InputStream.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/ListTokenSource.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/ListTokenSource.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..340a9ab76604f50ef5b44ec58db7a75c6b7fc583 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/ListTokenSource.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/Recognizer.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/Recognizer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89c9e2270c1e5b5aafb94125a47689dd673ac7cb Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/Recognizer.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/RuleContext.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/RuleContext.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe15dadc610a15c9556745a6150ab67020ac1497 Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/RuleContext.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/TokenStreamRewriter.cpython-38.pyc b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/TokenStreamRewriter.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53ea249b76659382ced3c7531083c0450a0934fa Binary files /dev/null and b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/TokenStreamRewriter.cpython-38.pyc differ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATN.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATN.py new file mode 100644 index 0000000000000000000000000000000000000000..3f1abe0a4a7faacde5140d5631dfe48a79325cd4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATN.py @@ -0,0 +1,132 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ +from antlr4.IntervalSet import IntervalSet + +from antlr4.RuleContext import RuleContext + +from antlr4.Token import Token +from antlr4.atn.ATNType import ATNType +from antlr4.atn.ATNState import ATNState, DecisionState + + +class ATN(object): + __slots__ = ( + 'grammarType', 'maxTokenType', 'states', 'decisionToState', + 'ruleToStartState', 'ruleToStopState', 'modeNameToStartState', + 'ruleToTokenType', 'lexerActions', 'modeToStartState' + ) + + INVALID_ALT_NUMBER = 0 + + # Used for runtime deserialization of ATNs from strings#/ + def __init__(self, grammarType:ATNType , maxTokenType:int ): + # The type of the ATN. + self.grammarType = grammarType + # The maximum value for any symbol recognized by a transition in the ATN. + self.maxTokenType = maxTokenType + self.states = [] + # Each subrule/rule is a decision point and we must track them so we + # can go back later and build DFA predictors for them. This includes + # all the rules, subrules, optional blocks, ()+, ()* etc... + self.decisionToState = [] + # Maps from rule index to starting state number. + self.ruleToStartState = [] + # Maps from rule index to stop state number. + self.ruleToStopState = None + self.modeNameToStartState = dict() + # For lexer ATNs, this maps the rule index to the resulting token type. + # For parser ATNs, this maps the rule index to the generated bypass token + # type if the + # {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions} + # deserialization option was specified; otherwise, this is {@code null}. + self.ruleToTokenType = None + # For lexer ATNs, this is an array of {@link LexerAction} objects which may + # be referenced by action transitions in the ATN. + self.lexerActions = None + self.modeToStartState = [] + + # Compute the set of valid tokens that can occur starting in state {@code s}. + # If {@code ctx} is null, the set of tokens will not include what can follow + # the rule surrounding {@code s}. In other words, the set will be + # restricted to tokens reachable staying within {@code s}'s rule. + def nextTokensInContext(self, s:ATNState, ctx:RuleContext): + from antlr4.LL1Analyzer import LL1Analyzer + anal = LL1Analyzer(self) + return anal.LOOK(s, ctx=ctx) + + # Compute the set of valid tokens that can occur starting in {@code s} and + # staying in same rule. {@link Token#EPSILON} is in set if we reach end of + # rule. + def nextTokensNoContext(self, s:ATNState): + if s.nextTokenWithinRule is not None: + return s.nextTokenWithinRule + s.nextTokenWithinRule = self.nextTokensInContext(s, None) + s.nextTokenWithinRule.readonly = True + return s.nextTokenWithinRule + + def nextTokens(self, s:ATNState, ctx:RuleContext = None): + if ctx==None: + return self.nextTokensNoContext(s) + else: + return self.nextTokensInContext(s, ctx) + + def addState(self, state:ATNState): + if state is not None: + state.atn = self + state.stateNumber = len(self.states) + self.states.append(state) + + def removeState(self, state:ATNState): + self.states[state.stateNumber] = None # just free mem, don't shift states in list + + def defineDecisionState(self, s:DecisionState): + self.decisionToState.append(s) + s.decision = len(self.decisionToState)-1 + return s.decision + + def getDecisionState(self, decision:int): + if len(self.decisionToState)==0: + return None + else: + return self.decisionToState[decision] + + # Computes the set of input symbols which could follow ATN state number + # {@code stateNumber} in the specified full {@code context}. This method + # considers the complete parser context, but does not evaluate semantic + # predicates (i.e. all predicates encountered during the calculation are + # assumed true). If a path in the ATN exists from the starting state to the + # {@link RuleStopState} of the outermost context without matching any + # symbols, {@link Token#EOF} is added to the returned set. + # + #
If {@code context} is {@code null}, it is treated as + # {@link ParserRuleContext#EMPTY}.
+ # + # @param stateNumber the ATN state number + # @param context the full parse context + # @return The set of potentially valid input symbols which could follow the + # specified state in the specified context. + # @throws IllegalArgumentException if the ATN does not contain a state with + # number {@code stateNumber} + #/ + def getExpectedTokens(self, stateNumber:int, ctx:RuleContext ): + if stateNumber < 0 or stateNumber >= len(self.states): + raise Exception("Invalid state number.") + s = self.states[stateNumber] + following = self.nextTokens(s) + if Token.EPSILON not in following: + return following + expected = IntervalSet() + expected.addSet(following) + expected.removeOne(Token.EPSILON) + while (ctx != None and ctx.invokingState >= 0 and Token.EPSILON in following): + invokingState = self.states[ctx.invokingState] + rt = invokingState.transitions[0] + following = self.nextTokens(rt.followState) + expected.addSet(following) + expected.removeOne(Token.EPSILON) + ctx = ctx.parentCtx + if Token.EPSILON in following: + expected.addOne(Token.EOF) + return expected diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfig.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfig.py new file mode 100644 index 0000000000000000000000000000000000000000..e008fb2efac45a24b6d9566170bbd227cee252b6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfig.py @@ -0,0 +1,159 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ + +# A tuple: (ATN state, predicted alt, syntactic, semantic context). +# The syntactic context is a graph-structured stack node whose +# path(s) to the root is the rule invocation(s) +# chain used to arrive at the state. The semantic context is +# the tree of semantic predicates encountered before reaching +# an ATN state. +#/ +from io import StringIO +from antlr4.PredictionContext import PredictionContext +from antlr4.atn.ATNState import ATNState, DecisionState +from antlr4.atn.LexerActionExecutor import LexerActionExecutor +from antlr4.atn.SemanticContext import SemanticContext + +# need a forward declaration +ATNConfig = None + +class ATNConfig(object): + __slots__ = ( + 'state', 'alt', 'context', 'semanticContext', 'reachesIntoOuterContext', + 'precedenceFilterSuppressed' + ) + + def __init__(self, state:ATNState=None, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=None, config:ATNConfig=None): + if config is not None: + if state is None: + state = config.state + if alt is None: + alt = config.alt + if context is None: + context = config.context + if semantic is None: + semantic = config.semanticContext + if semantic is None: + semantic = SemanticContext.NONE + # The ATN state associated with this configuration#/ + self.state = state + # What alt (or lexer rule) is predicted by this configuration#/ + self.alt = alt + # The stack of invoking states leading to the rule/states associated + # with this config. We track only those contexts pushed during + # execution of the ATN simulator. + self.context = context + self.semanticContext = semantic + # We cannot execute predicates dependent upon local context unless + # we know for sure we are in the correct context. Because there is + # no way to do this efficiently, we simply cannot evaluate + # dependent predicates unless we are in the rule that initially + # invokes the ATN simulator. + # + # closure() tracks the depth of how far we dip into the + # outer context: depth > 0. Note that it may not be totally + # accurate depth since I don't ever decrement. TODO: make it a boolean then + self.reachesIntoOuterContext = 0 if config is None else config.reachesIntoOuterContext + self.precedenceFilterSuppressed = False if config is None else config.precedenceFilterSuppressed + + # An ATN configuration is equal to another if both have + # the same state, they predict the same alternative, and + # syntactic/semantic contexts are the same. + #/ + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, ATNConfig): + return False + else: + return self.state.stateNumber==other.state.stateNumber \ + and self.alt==other.alt \ + and ((self.context is other.context) or (self.context==other.context)) \ + and self.semanticContext==other.semanticContext \ + and self.precedenceFilterSuppressed==other.precedenceFilterSuppressed + + def __hash__(self): + return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext)) + + def hashCodeForConfigSet(self): + return hash((self.state.stateNumber, self.alt, hash(self.semanticContext))) + + def equalsForConfigSet(self, other): + if self is other: + return True + elif not isinstance(other, ATNConfig): + return False + else: + return self.state.stateNumber==other.state.stateNumber \ + and self.alt==other.alt \ + and self.semanticContext==other.semanticContext + + def __str__(self): + with StringIO() as buf: + buf.write('(') + buf.write(str(self.state)) + buf.write(",") + buf.write(str(self.alt)) + if self.context is not None: + buf.write(",[") + buf.write(str(self.context)) + buf.write("]") + if self.semanticContext is not None and self.semanticContext is not SemanticContext.NONE: + buf.write(",") + buf.write(str(self.semanticContext)) + if self.reachesIntoOuterContext>0: + buf.write(",up=") + buf.write(str(self.reachesIntoOuterContext)) + buf.write(')') + return buf.getvalue() + +# need a forward declaration +LexerATNConfig = None + +class LexerATNConfig(ATNConfig): + __slots__ = ('lexerActionExecutor', 'passedThroughNonGreedyDecision') + + def __init__(self, state:ATNState, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=SemanticContext.NONE, + lexerActionExecutor:LexerActionExecutor=None, config:LexerATNConfig=None): + super().__init__(state=state, alt=alt, context=context, semantic=semantic, config=config) + if config is not None: + if lexerActionExecutor is None: + lexerActionExecutor = config.lexerActionExecutor + # This is the backing field for {@link #getLexerActionExecutor}. + self.lexerActionExecutor = lexerActionExecutor + self.passedThroughNonGreedyDecision = False if config is None else self.checkNonGreedyDecision(config, state) + + def __hash__(self): + return hash((self.state.stateNumber, self.alt, self.context, + self.semanticContext, self.passedThroughNonGreedyDecision, + self.lexerActionExecutor)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerATNConfig): + return False + if self.passedThroughNonGreedyDecision != other.passedThroughNonGreedyDecision: + return False + if not(self.lexerActionExecutor == other.lexerActionExecutor): + return False + return super().__eq__(other) + + + + def hashCodeForConfigSet(self): + return hash(self) + + + + def equalsForConfigSet(self, other): + return self==other + + + + def checkNonGreedyDecision(self, source:LexerATNConfig, target:ATNState): + return source.passedThroughNonGreedyDecision \ + or isinstance(target, DecisionState) and target.nonGreedy diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfigSet.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfigSet.py new file mode 100644 index 0000000000000000000000000000000000000000..9e9a512a682431e46a3f5b848420a762af3c7914 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfigSet.py @@ -0,0 +1,212 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. + +# +# Specialized {@link Set}{@code <}{@link ATNConfig}{@code >} that can track +# info about the set, with support for combining similar configurations using a +# graph-structured stack. +#/ +from io import StringIO +from functools import reduce +from antlr4.PredictionContext import PredictionContext, merge +from antlr4.Utils import str_list +from antlr4.atn.ATN import ATN +from antlr4.atn.ATNConfig import ATNConfig +from antlr4.atn.SemanticContext import SemanticContext +from antlr4.error.Errors import UnsupportedOperationException, IllegalStateException + +ATNSimulator = None + +class ATNConfigSet(object): + __slots__ = ( + 'configLookup', 'fullCtx', 'readonly', 'configs', 'uniqueAlt', + 'conflictingAlts', 'hasSemanticContext', 'dipsIntoOuterContext', + 'cachedHashCode' + ) + + # + # The reason that we need this is because we don't want the hash map to use + # the standard hash code and equals. We need all configurations with the same + # {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively doubles + # the number of objects associated with ATNConfigs. The other solution is to + # use a hash table that lets us specify the equals/hashcode operation. + + def __init__(self, fullCtx:bool=True): + # All configs but hashed by (s, i, _, pi) not including context. Wiped out + # when we go readonly as this set becomes a DFA state. + self.configLookup = dict() + # Indicates that this configuration set is part of a full context + # LL prediction. It will be used to determine how to merge $. With SLL + # it's a wildcard whereas it is not for LL context merge. + self.fullCtx = fullCtx + # Indicates that the set of configurations is read-only. Do not + # allow any code to manipulate the set; DFA states will point at + # the sets and they must not change. This does not protect the other + # fields; in particular, conflictingAlts is set after + # we've made this readonly. + self.readonly = False + # Track the elements as they are added to the set; supports get(i)#/ + self.configs = [] + + # TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation + # TODO: can we track conflicts as they are added to save scanning configs later? + self.uniqueAlt = 0 + self.conflictingAlts = None + + # Used in parser and lexer. In lexer, it indicates we hit a pred + # while computing a closure operation. Don't make a DFA state from this. + self.hasSemanticContext = False + self.dipsIntoOuterContext = False + + self.cachedHashCode = -1 + + def __iter__(self): + return self.configs.__iter__() + + # Adding a new config means merging contexts with existing configs for + # {@code (s, i, pi, _)}, where {@code s} is the + # {@link ATNConfig#state}, {@code i} is the {@link ATNConfig#alt}, and + # {@code pi} is the {@link ATNConfig#semanticContext}. We use + # {@code (s,i,pi)} as key. + # + #This method updates {@link #dipsIntoOuterContext} and + # {@link #hasSemanticContext} when necessary.
+ #/ + def add(self, config:ATNConfig, mergeCache=None): + if self.readonly: + raise Exception("This set is readonly") + if config.semanticContext is not SemanticContext.NONE: + self.hasSemanticContext = True + if config.reachesIntoOuterContext > 0: + self.dipsIntoOuterContext = True + existing = self.getOrAdd(config) + if existing is config: + self.cachedHashCode = -1 + self.configs.append(config) # track order here + return True + # a previous (s,i,pi,_), merge with it and save result + rootIsWildcard = not self.fullCtx + merged = merge(existing.context, config.context, rootIsWildcard, mergeCache) + # no need to check for existing.context, config.context in cache + # since only way to create new graphs is "call rule" and here. + # We cache at both places. + existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext) + # make sure to preserve the precedence filter suppression during the merge + if config.precedenceFilterSuppressed: + existing.precedenceFilterSuppressed = True + existing.context = merged # replace context; no need to alt mapping + return True + + def getOrAdd(self, config:ATNConfig): + h = config.hashCodeForConfigSet() + l = self.configLookup.get(h, None) + if l is not None: + r = next((cfg for cfg in l if config.equalsForConfigSet(cfg)), None) + if r is not None: + return r + if l is None: + l = [config] + self.configLookup[h] = l + else: + l.append(config) + return config + + def getStates(self): + return set(c.state for c in self.configs) + + def getPredicates(self): + return list(cfg.semanticContext for cfg in self.configs if cfg.semanticContext!=SemanticContext.NONE) + + def get(self, i:int): + return self.configs[i] + + def optimizeConfigs(self, interpreter:ATNSimulator): + if self.readonly: + raise IllegalStateException("This set is readonly") + if len(self.configs)==0: + return + for config in self.configs: + config.context = interpreter.getCachedContext(config.context) + + def addAll(self, coll:list): + for c in coll: + self.add(c) + return False + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, ATNConfigSet): + return False + + same = self.configs is not None and \ + self.configs==other.configs and \ + self.fullCtx == other.fullCtx and \ + self.uniqueAlt == other.uniqueAlt and \ + self.conflictingAlts == other.conflictingAlts and \ + self.hasSemanticContext == other.hasSemanticContext and \ + self.dipsIntoOuterContext == other.dipsIntoOuterContext + + return same + + def __hash__(self): + if self.readonly: + if self.cachedHashCode == -1: + self.cachedHashCode = self.hashConfigs() + return self.cachedHashCode + return self.hashConfigs() + + def hashConfigs(self): + return reduce(lambda h, cfg: hash((h, cfg)), self.configs, 0) + + def __len__(self): + return len(self.configs) + + def isEmpty(self): + return len(self.configs)==0 + + def __contains__(self, config): + if self.configLookup is None: + raise UnsupportedOperationException("This method is not implemented for readonly sets.") + h = config.hashCodeForConfigSet() + l = self.configLookup.get(h, None) + if l is not None: + for c in l: + if config.equalsForConfigSet(c): + return True + return False + + def clear(self): + if self.readonly: + raise IllegalStateException("This set is readonly") + self.configs.clear() + self.cachedHashCode = -1 + self.configLookup.clear() + + def setReadonly(self, readonly:bool): + self.readonly = readonly + self.configLookup = None # can't mod, no need for lookup cache + + def __str__(self): + with StringIO() as buf: + buf.write(str_list(self.configs)) + if self.hasSemanticContext: + buf.write(",hasSemanticContext=") + buf.write(str(self.hasSemanticContext)) + if self.uniqueAlt!=ATN.INVALID_ALT_NUMBER: + buf.write(",uniqueAlt=") + buf.write(str(self.uniqueAlt)) + if self.conflictingAlts is not None: + buf.write(",conflictingAlts=") + buf.write(str(self.conflictingAlts)) + if self.dipsIntoOuterContext: + buf.write(",dipsIntoOuterContext") + return buf.getvalue() + + +class OrderedATNConfigSet(ATNConfigSet): + + def __init__(self): + super().__init__() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializationOptions.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializationOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..69d5437f35437874a60bcc76118f018edbf65fed --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializationOptions.py @@ -0,0 +1,24 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. + +# need a forward declaration +ATNDeserializationOptions = None + +class ATNDeserializationOptions(object): + __slots__ = ('readonly', 'verifyATN', 'generateRuleBypassTransitions') + + defaultOptions = None + + def __init__(self, copyFrom:ATNDeserializationOptions = None): + self.readonly = False + self.verifyATN = True if copyFrom is None else copyFrom.verifyATN + self.generateRuleBypassTransitions = False if copyFrom is None else copyFrom.generateRuleBypassTransitions + + def __setattr__(self, key, value): + if key!="readonly" and self.readonly: + raise Exception("The object is read only.") + super(type(self), self).__setattr__(key,value) + +ATNDeserializationOptions.defaultOptions = ATNDeserializationOptions() +ATNDeserializationOptions.defaultOptions.readonly = True diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializer.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializer.py new file mode 100644 index 0000000000000000000000000000000000000000..cc100d05a43e10b91390dd47a7b05f2eff6fe23f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializer.py @@ -0,0 +1,529 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ +from uuid import UUID +from io import StringIO +from typing import Callable +from antlr4.Token import Token +from antlr4.atn.ATN import ATN +from antlr4.atn.ATNType import ATNType +from antlr4.atn.ATNState import * +from antlr4.atn.Transition import * +from antlr4.atn.LexerAction import * +from antlr4.atn.ATNDeserializationOptions import ATNDeserializationOptions + +# This is the earliest supported serialized UUID. +BASE_SERIALIZED_UUID = UUID("AADB8D7E-AEEF-4415-AD2B-8204D6CF042E") + +# This UUID indicates the serialized ATN contains two sets of +# IntervalSets, where the second set's values are encoded as +# 32-bit integers to support the full Unicode SMP range up to U+10FFFF. +ADDED_UNICODE_SMP = UUID("59627784-3BE5-417A-B9EB-8131A7286089") + +# This list contains all of the currently supported UUIDs, ordered by when +# the feature first appeared in this branch. +SUPPORTED_UUIDS = [ BASE_SERIALIZED_UUID, ADDED_UNICODE_SMP ] + +SERIALIZED_VERSION = 3 + +# This is the current serialized UUID. +SERIALIZED_UUID = ADDED_UNICODE_SMP + +class ATNDeserializer (object): + __slots__ = ('deserializationOptions', 'data', 'pos', 'uuid') + + def __init__(self, options : ATNDeserializationOptions = None): + if options is None: + options = ATNDeserializationOptions.defaultOptions + self.deserializationOptions = options + + # Determines if a particular serialized representation of an ATN supports + # a particular feature, identified by the {@link UUID} used for serializing + # the ATN at the time the feature was first introduced. + # + # @param feature The {@link UUID} marking the first time the feature was + # supported in the serialized ATN. + # @param actualUuid The {@link UUID} of the actual serialized ATN which is + # currently being deserialized. + # @return {@code true} if the {@code actualUuid} value represents a + # serialized ATN at or after the feature identified by {@code feature} was + # introduced; otherwise, {@code false}. + + def isFeatureSupported(self, feature : UUID , actualUuid : UUID ): + idx1 = SUPPORTED_UUIDS.index(feature) + if idx1<0: + return False + idx2 = SUPPORTED_UUIDS.index(actualUuid) + return idx2 >= idx1 + + def deserialize(self, data : str): + self.reset(data) + self.checkVersion() + self.checkUUID() + atn = self.readATN() + self.readStates(atn) + self.readRules(atn) + self.readModes(atn) + sets = [] + # First, read all sets with 16-bit Unicode code points <= U+FFFF. + self.readSets(atn, sets, self.readInt) + # Next, if the ATN was serialized with the Unicode SMP feature, + # deserialize sets with 32-bit arguments <= U+10FFFF. + if self.isFeatureSupported(ADDED_UNICODE_SMP, self.uuid): + self.readSets(atn, sets, self.readInt32) + self.readEdges(atn, sets) + self.readDecisions(atn) + self.readLexerActions(atn) + self.markPrecedenceDecisions(atn) + self.verifyATN(atn) + if self.deserializationOptions.generateRuleBypassTransitions \ + and atn.grammarType == ATNType.PARSER: + self.generateRuleBypassTransitions(atn) + # re-verify after modification + self.verifyATN(atn) + return atn + + def reset(self, data:str): + def adjust(c): + v = ord(c) + return v-2 if v>1 else v + 65533 + temp = [ adjust(c) for c in data ] + # don't adjust the first value since that's the version number + temp[0] = ord(data[0]) + self.data = temp + self.pos = 0 + + def checkVersion(self): + version = self.readInt() + if version != SERIALIZED_VERSION: + raise Exception("Could not deserialize ATN with version " + str(version) + " (expected " + str(SERIALIZED_VERSION) + ").") + + def checkUUID(self): + uuid = self.readUUID() + if not uuid in SUPPORTED_UUIDS: + raise Exception("Could not deserialize ATN with UUID: " + str(uuid) + \ + " (expected " + str(SERIALIZED_UUID) + " or a legacy UUID).", uuid, SERIALIZED_UUID) + self.uuid = uuid + + def readATN(self): + idx = self.readInt() + grammarType = ATNType.fromOrdinal(idx) + maxTokenType = self.readInt() + return ATN(grammarType, maxTokenType) + + def readStates(self, atn:ATN): + loopBackStateNumbers = [] + endStateNumbers = [] + nstates = self.readInt() + for i in range(0, nstates): + stype = self.readInt() + # ignore bad type of states + if stype==ATNState.INVALID_TYPE: + atn.addState(None) + continue + ruleIndex = self.readInt() + if ruleIndex == 0xFFFF: + ruleIndex = -1 + + s = self.stateFactory(stype, ruleIndex) + if stype == ATNState.LOOP_END: # special case + loopBackStateNumber = self.readInt() + loopBackStateNumbers.append((s, loopBackStateNumber)) + elif isinstance(s, BlockStartState): + endStateNumber = self.readInt() + endStateNumbers.append((s, endStateNumber)) + + atn.addState(s) + + # delay the assignment of loop back and end states until we know all the state instances have been initialized + for pair in loopBackStateNumbers: + pair[0].loopBackState = atn.states[pair[1]] + + for pair in endStateNumbers: + pair[0].endState = atn.states[pair[1]] + + numNonGreedyStates = self.readInt() + for i in range(0, numNonGreedyStates): + stateNumber = self.readInt() + atn.states[stateNumber].nonGreedy = True + + numPrecedenceStates = self.readInt() + for i in range(0, numPrecedenceStates): + stateNumber = self.readInt() + atn.states[stateNumber].isPrecedenceRule = True + + def readRules(self, atn:ATN): + nrules = self.readInt() + if atn.grammarType == ATNType.LEXER: + atn.ruleToTokenType = [0] * nrules + + atn.ruleToStartState = [0] * nrules + for i in range(0, nrules): + s = self.readInt() + startState = atn.states[s] + atn.ruleToStartState[i] = startState + if atn.grammarType == ATNType.LEXER: + tokenType = self.readInt() + if tokenType == 0xFFFF: + tokenType = Token.EOF + + atn.ruleToTokenType[i] = tokenType + + atn.ruleToStopState = [0] * nrules + for state in atn.states: + if not isinstance(state, RuleStopState): + continue + atn.ruleToStopState[state.ruleIndex] = state + atn.ruleToStartState[state.ruleIndex].stopState = state + + def readModes(self, atn:ATN): + nmodes = self.readInt() + for i in range(0, nmodes): + s = self.readInt() + atn.modeToStartState.append(atn.states[s]) + + def readSets(self, atn:ATN, sets:list, readUnicode:Callable[[], int]): + m = self.readInt() + for i in range(0, m): + iset = IntervalSet() + sets.append(iset) + n = self.readInt() + containsEof = self.readInt() + if containsEof!=0: + iset.addOne(-1) + for j in range(0, n): + i1 = readUnicode() + i2 = readUnicode() + iset.addRange(range(i1, i2 + 1)) # range upper limit is exclusive + + def readEdges(self, atn:ATN, sets:list): + nedges = self.readInt() + for i in range(0, nedges): + src = self.readInt() + trg = self.readInt() + ttype = self.readInt() + arg1 = self.readInt() + arg2 = self.readInt() + arg3 = self.readInt() + trans = self.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) + srcState = atn.states[src] + srcState.addTransition(trans) + + # edges for rule stop states can be derived, so they aren't serialized + for state in atn.states: + for i in range(0, len(state.transitions)): + t = state.transitions[i] + if not isinstance(t, RuleTransition): + continue + outermostPrecedenceReturn = -1 + if atn.ruleToStartState[t.target.ruleIndex].isPrecedenceRule: + if t.precedence == 0: + outermostPrecedenceReturn = t.target.ruleIndex + trans = EpsilonTransition(t.followState, outermostPrecedenceReturn) + atn.ruleToStopState[t.target.ruleIndex].addTransition(trans) + + for state in atn.states: + if isinstance(state, BlockStartState): + # we need to know the end state to set its start state + if state.endState is None: + raise Exception("IllegalState") + # block end states can only be associated to a single block start state + if state.endState.startState is not None: + raise Exception("IllegalState") + state.endState.startState = state + + if isinstance(state, PlusLoopbackState): + for i in range(0, len(state.transitions)): + target = state.transitions[i].target + if isinstance(target, PlusBlockStartState): + target.loopBackState = state + elif isinstance(state, StarLoopbackState): + for i in range(0, len(state.transitions)): + target = state.transitions[i].target + if isinstance(target, StarLoopEntryState): + target.loopBackState = state + + def readDecisions(self, atn:ATN): + ndecisions = self.readInt() + for i in range(0, ndecisions): + s = self.readInt() + decState = atn.states[s] + atn.decisionToState.append(decState) + decState.decision = i + + def readLexerActions(self, atn:ATN): + if atn.grammarType == ATNType.LEXER: + count = self.readInt() + atn.lexerActions = [ None ] * count + for i in range(0, count): + actionType = self.readInt() + data1 = self.readInt() + if data1 == 0xFFFF: + data1 = -1 + data2 = self.readInt() + if data2 == 0xFFFF: + data2 = -1 + lexerAction = self.lexerActionFactory(actionType, data1, data2) + atn.lexerActions[i] = lexerAction + + def generateRuleBypassTransitions(self, atn:ATN): + + count = len(atn.ruleToStartState) + atn.ruleToTokenType = [ 0 ] * count + for i in range(0, count): + atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 + + for i in range(0, count): + self.generateRuleBypassTransition(atn, i) + + def generateRuleBypassTransition(self, atn:ATN, idx:int): + + bypassStart = BasicBlockStartState() + bypassStart.ruleIndex = idx + atn.addState(bypassStart) + + bypassStop = BlockEndState() + bypassStop.ruleIndex = idx + atn.addState(bypassStop) + + bypassStart.endState = bypassStop + atn.defineDecisionState(bypassStart) + + bypassStop.startState = bypassStart + + excludeTransition = None + + if atn.ruleToStartState[idx].isPrecedenceRule: + # wrap from the beginning of the rule to the StarLoopEntryState + endState = None + for state in atn.states: + if self.stateIsEndStateFor(state, idx): + endState = state + excludeTransition = state.loopBackState.transitions[0] + break + + if excludeTransition is None: + raise Exception("Couldn't identify final state of the precedence rule prefix section.") + + else: + + endState = atn.ruleToStopState[idx] + + # all non-excluded transitions that currently target end state need to target blockEnd instead + for state in atn.states: + for transition in state.transitions: + if transition == excludeTransition: + continue + if transition.target == endState: + transition.target = bypassStop + + # all transitions leaving the rule start state need to leave blockStart instead + ruleToStartState = atn.ruleToStartState[idx] + count = len(ruleToStartState.transitions) + while count > 0: + bypassStart.addTransition(ruleToStartState.transitions[count-1]) + del ruleToStartState.transitions[-1] + + # link the new states + atn.ruleToStartState[idx].addTransition(EpsilonTransition(bypassStart)) + bypassStop.addTransition(EpsilonTransition(endState)) + + matchState = BasicState() + atn.addState(matchState) + matchState.addTransition(AtomTransition(bypassStop, atn.ruleToTokenType[idx])) + bypassStart.addTransition(EpsilonTransition(matchState)) + + + def stateIsEndStateFor(self, state:ATNState, idx:int): + if state.ruleIndex != idx: + return None + if not isinstance(state, StarLoopEntryState): + return None + + maybeLoopEndState = state.transitions[len(state.transitions) - 1].target + if not isinstance(maybeLoopEndState, LoopEndState): + return None + + if maybeLoopEndState.epsilonOnlyTransitions and \ + isinstance(maybeLoopEndState.transitions[0].target, RuleStopState): + return state + else: + return None + + + # + # Analyze the {@link StarLoopEntryState} states in the specified ATN to set + # the {@link StarLoopEntryState#isPrecedenceDecision} field to the + # correct value. + # + # @param atn The ATN. + # + def markPrecedenceDecisions(self, atn:ATN): + for state in atn.states: + if not isinstance(state, StarLoopEntryState): + continue + + # We analyze the ATN to determine if this ATN decision state is the + # decision for the closure block that determines whether a + # precedence rule should continue or complete. + # + if atn.ruleToStartState[state.ruleIndex].isPrecedenceRule: + maybeLoopEndState = state.transitions[len(state.transitions) - 1].target + if isinstance(maybeLoopEndState, LoopEndState): + if maybeLoopEndState.epsilonOnlyTransitions and \ + isinstance(maybeLoopEndState.transitions[0].target, RuleStopState): + state.isPrecedenceDecision = True + + def verifyATN(self, atn:ATN): + if not self.deserializationOptions.verifyATN: + return + # verify assumptions + for state in atn.states: + if state is None: + continue + + self.checkCondition(state.epsilonOnlyTransitions or len(state.transitions) <= 1) + + if isinstance(state, PlusBlockStartState): + self.checkCondition(state.loopBackState is not None) + + if isinstance(state, StarLoopEntryState): + self.checkCondition(state.loopBackState is not None) + self.checkCondition(len(state.transitions) == 2) + + if isinstance(state.transitions[0].target, StarBlockStartState): + self.checkCondition(isinstance(state.transitions[1].target, LoopEndState)) + self.checkCondition(not state.nonGreedy) + elif isinstance(state.transitions[0].target, LoopEndState): + self.checkCondition(isinstance(state.transitions[1].target, StarBlockStartState)) + self.checkCondition(state.nonGreedy) + else: + raise Exception("IllegalState") + + if isinstance(state, StarLoopbackState): + self.checkCondition(len(state.transitions) == 1) + self.checkCondition(isinstance(state.transitions[0].target, StarLoopEntryState)) + + if isinstance(state, LoopEndState): + self.checkCondition(state.loopBackState is not None) + + if isinstance(state, RuleStartState): + self.checkCondition(state.stopState is not None) + + if isinstance(state, BlockStartState): + self.checkCondition(state.endState is not None) + + if isinstance(state, BlockEndState): + self.checkCondition(state.startState is not None) + + if isinstance(state, DecisionState): + self.checkCondition(len(state.transitions) <= 1 or state.decision >= 0) + else: + self.checkCondition(len(state.transitions) <= 1 or isinstance(state, RuleStopState)) + + def checkCondition(self, condition:bool, message=None): + if not condition: + if message is None: + message = "IllegalState" + raise Exception(message) + + def readInt(self): + i = self.data[self.pos] + self.pos += 1 + return i + + def readInt32(self): + low = self.readInt() + high = self.readInt() + return low | (high << 16) + + def readLong(self): + low = self.readInt32() + high = self.readInt32() + return (low & 0x00000000FFFFFFFF) | (high << 32) + + def readUUID(self): + low = self.readLong() + high = self.readLong() + allBits = (low & 0xFFFFFFFFFFFFFFFF) | (high << 64) + return UUID(int=allBits) + + edgeFactories = [ lambda args : None, + lambda atn, src, trg, arg1, arg2, arg3, sets, target : EpsilonTransition(target), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + RangeTransition(target, Token.EOF, arg2) if arg3 != 0 else RangeTransition(target, arg1, arg2), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + RuleTransition(atn.states[arg1], arg2, arg3, target), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + PredicateTransition(target, arg1, arg2, arg3 != 0), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + AtomTransition(target, Token.EOF) if arg3 != 0 else AtomTransition(target, arg1), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + ActionTransition(target, arg1, arg2, arg3 != 0), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + SetTransition(target, sets[arg1]), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + NotSetTransition(target, sets[arg1]), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + WildcardTransition(target), + lambda atn, src, trg, arg1, arg2, arg3, sets, target : \ + PrecedencePredicateTransition(target, arg1) + ] + + def edgeFactory(self, atn:ATN, type:int, src:int, trg:int, arg1:int, arg2:int, arg3:int, sets:list): + target = atn.states[trg] + if type > len(self.edgeFactories) or self.edgeFactories[type] is None: + raise Exception("The specified transition type: " + str(type) + " is not valid.") + else: + return self.edgeFactories[type](atn, src, trg, arg1, arg2, arg3, sets, target) + + stateFactories = [ lambda : None, + lambda : BasicState(), + lambda : RuleStartState(), + lambda : BasicBlockStartState(), + lambda : PlusBlockStartState(), + lambda : StarBlockStartState(), + lambda : TokensStartState(), + lambda : RuleStopState(), + lambda : BlockEndState(), + lambda : StarLoopbackState(), + lambda : StarLoopEntryState(), + lambda : PlusLoopbackState(), + lambda : LoopEndState() + ] + + def stateFactory(self, type:int, ruleIndex:int): + if type> len(self.stateFactories) or self.stateFactories[type] is None: + raise Exception("The specified state type " + str(type) + " is not valid.") + else: + s = self.stateFactories[type]() + if s is not None: + s.ruleIndex = ruleIndex + return s + + CHANNEL = 0 #The type of a {@link LexerChannelAction} action. + CUSTOM = 1 #The type of a {@link LexerCustomAction} action. + MODE = 2 #The type of a {@link LexerModeAction} action. + MORE = 3 #The type of a {@link LexerMoreAction} action. + POP_MODE = 4 #The type of a {@link LexerPopModeAction} action. + PUSH_MODE = 5 #The type of a {@link LexerPushModeAction} action. + SKIP = 6 #The type of a {@link LexerSkipAction} action. + TYPE = 7 #The type of a {@link LexerTypeAction} action. + + actionFactories = [ lambda data1, data2: LexerChannelAction(data1), + lambda data1, data2: LexerCustomAction(data1, data2), + lambda data1, data2: LexerModeAction(data1), + lambda data1, data2: LexerMoreAction.INSTANCE, + lambda data1, data2: LexerPopModeAction.INSTANCE, + lambda data1, data2: LexerPushModeAction(data1), + lambda data1, data2: LexerSkipAction.INSTANCE, + lambda data1, data2: LexerTypeAction(data1) + ] + + def lexerActionFactory(self, type:int, data1:int, data2:int): + + if type > len(self.actionFactories) or self.actionFactories[type] is None: + raise Exception("The specified lexer action type " + str(type) + " is not valid.") + else: + return self.actionFactories[type](data1, data2) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNSimulator.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNSimulator.py new file mode 100644 index 0000000000000000000000000000000000000000..4f6f53f488fa2b731fb39ceecf350b0cb43d1c39 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNSimulator.py @@ -0,0 +1,47 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ +from antlr4.PredictionContext import PredictionContextCache, PredictionContext, getCachedPredictionContext +from antlr4.atn.ATN import ATN +from antlr4.atn.ATNConfigSet import ATNConfigSet +from antlr4.dfa.DFAState import DFAState + + +class ATNSimulator(object): + __slots__ = ('atn', 'sharedContextCache', '__dict__') + + # Must distinguish between missing edge and edge we know leads nowhere#/ + ERROR = DFAState(configs=ATNConfigSet()) + ERROR.stateNumber = 0x7FFFFFFF + + # The context cache maps all PredictionContext objects that are == + # to a single cached copy. This cache is shared across all contexts + # in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet + # to use only cached nodes/graphs in addDFAState(). We don't want to + # fill this during closure() since there are lots of contexts that + # pop up but are not used ever again. It also greatly slows down closure(). + # + #This cache makes a huge difference in memory and a little bit in speed. + # For the Java grammar on java.*, it dropped the memory requirements + # at the end from 25M to 16M. We don't store any of the full context + # graphs in the DFA because they are limited to local context only, + # but apparently there's a lot of repetition there as well. We optimize + # the config contexts before storing the config set in the DFA states + # by literally rebuilding them with cached subgraphs only.
+ # + #I tried a cache for use during closure operations, that was + # whacked after each adaptivePredict(). It cost a little bit + # more time I think and doesn't save on the overall footprint + # so it's not worth the complexity.
+ #/ + def __init__(self, atn:ATN, sharedContextCache:PredictionContextCache): + self.atn = atn + self.sharedContextCache = sharedContextCache + + def getCachedContext(self, context:PredictionContext): + if self.sharedContextCache is None: + return context + visited = dict() + return getCachedPredictionContext(context, self.sharedContextCache, visited) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNState.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNState.py new file mode 100644 index 0000000000000000000000000000000000000000..fbf6a7b9442905c992bb1a88ce343a8a9d63f5b7 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNState.py @@ -0,0 +1,264 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# The following images show the relation of states and +# {@link ATNState#transitions} for various grammar constructs. +# +#We track these variables separately for the DFA and ATN simulation +# because the DFA simulation often has to fail over to the ATN +# simulation. If the ATN simulation fails, we need the DFA to fall +# back to its previously accepted state, if any. If the ATN succeeds, +# then the ATN does the accept and the DFA simulator that invoked it +# can simply return the predicted token type.
+#/ + +from antlr4.PredictionContext import PredictionContextCache, SingletonPredictionContext, PredictionContext +from antlr4.InputStream import InputStream +from antlr4.Token import Token +from antlr4.atn.ATN import ATN +from antlr4.atn.ATNConfig import LexerATNConfig +from antlr4.atn.ATNSimulator import ATNSimulator +from antlr4.atn.ATNConfigSet import ATNConfigSet, OrderedATNConfigSet +from antlr4.atn.ATNState import RuleStopState, ATNState +from antlr4.atn.LexerActionExecutor import LexerActionExecutor +from antlr4.atn.Transition import Transition +from antlr4.dfa.DFAState import DFAState +from antlr4.error.Errors import LexerNoViableAltException, UnsupportedOperationException + +class SimState(object): + __slots__ = ('index', 'line', 'column', 'dfaState') + + def __init__(self): + self.reset() + + def reset(self): + self.index = -1 + self.line = 0 + self.column = -1 + self.dfaState = None + +# need forward declaration +Lexer = None +LexerATNSimulator = None + +class LexerATNSimulator(ATNSimulator): + __slots__ = ( + 'decisionToDFA', 'recog', 'startIndex', 'line', 'column', 'mode', + 'DEFAULT_MODE', 'MAX_CHAR_VALUE', 'prevAccept' + ) + + debug = False + dfa_debug = False + + MIN_DFA_EDGE = 0 + MAX_DFA_EDGE = 127 # forces unicode to stay in ATN + + ERROR = None + + def __init__(self, recog:Lexer, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache): + super().__init__(atn, sharedContextCache) + self.decisionToDFA = decisionToDFA + self.recog = recog + # The current token's starting index into the character stream. + # Shared across DFA to ATN simulation in case the ATN fails and the + # DFA did not have a previous accept state. In this case, we use the + # ATN-generated exception object. + self.startIndex = -1 + # line number 1..n within the input#/ + self.line = 1 + # The index of the character relative to the beginning of the line 0..n-1#/ + self.column = 0 + from antlr4.Lexer import Lexer + self.mode = Lexer.DEFAULT_MODE + # Cache Lexer properties to avoid further imports + self.DEFAULT_MODE = Lexer.DEFAULT_MODE + self.MAX_CHAR_VALUE = Lexer.MAX_CHAR_VALUE + # Used during DFA/ATN exec to record the most recent accept configuration info + self.prevAccept = SimState() + + + def copyState(self, simulator:LexerATNSimulator ): + self.column = simulator.column + self.line = simulator.line + self.mode = simulator.mode + self.startIndex = simulator.startIndex + + def match(self, input:InputStream , mode:int): + self.mode = mode + mark = input.mark() + try: + self.startIndex = input.index + self.prevAccept.reset() + dfa = self.decisionToDFA[mode] + if dfa.s0 is None: + return self.matchATN(input) + else: + return self.execATN(input, dfa.s0) + finally: + input.release(mark) + + def reset(self): + self.prevAccept.reset() + self.startIndex = -1 + self.line = 1 + self.column = 0 + self.mode = self.DEFAULT_MODE + + def matchATN(self, input:InputStream): + startState = self.atn.modeToStartState[self.mode] + + if LexerATNSimulator.debug: + print("matchATN mode " + str(self.mode) + " start: " + str(startState)) + + old_mode = self.mode + s0_closure = self.computeStartState(input, startState) + suppressEdge = s0_closure.hasSemanticContext + s0_closure.hasSemanticContext = False + + next = self.addDFAState(s0_closure) + if not suppressEdge: + self.decisionToDFA[self.mode].s0 = next + + predict = self.execATN(input, next) + + if LexerATNSimulator.debug: + print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString())) + + return predict + + def execATN(self, input:InputStream, ds0:DFAState): + if LexerATNSimulator.debug: + print("start state closure=" + str(ds0.configs)) + + if ds0.isAcceptState: + # allow zero-length tokens + self.captureSimState(self.prevAccept, input, ds0) + + t = input.LA(1) + s = ds0 # s is current/from DFA state + + while True: # while more work + if LexerATNSimulator.debug: + print("execATN loop starting closure:", str(s.configs)) + + # As we move src->trg, src->trg, we keep track of the previous trg to + # avoid looking up the DFA state again, which is expensive. + # If the previous target was already part of the DFA, we might + # be able to avoid doing a reach operation upon t. If s!=null, + # it means that semantic predicates didn't prevent us from + # creating a DFA state. Once we know s!=null, we check to see if + # the DFA state has an edge already for t. If so, we can just reuse + # it's configuration set; there's no point in re-computing it. + # This is kind of like doing DFA simulation within the ATN + # simulation because DFA simulation is really just a way to avoid + # computing reach/closure sets. Technically, once we know that + # we have a previously added DFA state, we could jump over to + # the DFA simulator. But, that would mean popping back and forth + # a lot and making things more complicated algorithmically. + # This optimization makes a lot of sense for loops within DFA. + # A character will take us back to an existing DFA state + # that already has lots of edges out of it. e.g., .* in comments. + # print("Target for:" + str(s) + " and:" + str(t)) + target = self.getExistingTargetState(s, t) + # print("Existing:" + str(target)) + if target is None: + target = self.computeTargetState(input, s, t) + # print("Computed:" + str(target)) + + if target == self.ERROR: + break + + # If this is a consumable input element, make sure to consume before + # capturing the accept state so the input index, line, and char + # position accurately reflect the state of the interpreter at the + # end of the token. + if t != Token.EOF: + self.consume(input) + + if target.isAcceptState: + self.captureSimState(self.prevAccept, input, target) + if t == Token.EOF: + break + + t = input.LA(1) + + s = target # flip; current DFA target becomes new src/from state + + return self.failOrAccept(self.prevAccept, input, s.configs, t) + + # Get an existing target state for an edge in the DFA. If the target state + # for the edge has not yet been computed or is otherwise not available, + # this method returns {@code null}. + # + # @param s The current DFA state + # @param t The next input symbol + # @return The existing target DFA state for the given input symbol + # {@code t}, or {@code null} if the target state for this edge is not + # already cached + def getExistingTargetState(self, s:DFAState, t:int): + if s.edges is None or t < self.MIN_DFA_EDGE or t > self.MAX_DFA_EDGE: + return None + + target = s.edges[t - self.MIN_DFA_EDGE] + if LexerATNSimulator.debug and target is not None: + print("reuse state", str(s.stateNumber), "edge to", str(target.stateNumber)) + + return target + + # Compute a target state for an edge in the DFA, and attempt to add the + # computed state and corresponding edge to the DFA. + # + # @param input The input stream + # @param s The current DFA state + # @param t The next input symbol + # + # @return The computed target DFA state for the given input symbol + # {@code t}. If {@code t} does not lead to a valid DFA state, this method + # returns {@link #ERROR}. + def computeTargetState(self, input:InputStream, s:DFAState, t:int): + reach = OrderedATNConfigSet() + + # if we don't find an existing DFA state + # Fill reach starting from closure, following t transitions + self.getReachableConfigSet(input, s.configs, reach, t) + + if len(reach)==0: # we got nowhere on t from s + if not reach.hasSemanticContext: + # we got nowhere on t, don't throw out this knowledge; it'd + # cause a failover from DFA later. + self. addDFAEdge(s, t, self.ERROR) + + # stop when we can't match any more char + return self.ERROR + + # Add an edge from s to target DFA found/created for reach + return self.addDFAEdge(s, t, cfgs=reach) + + def failOrAccept(self, prevAccept:SimState , input:InputStream, reach:ATNConfigSet, t:int): + if self.prevAccept.dfaState is not None: + lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor + self.accept(input, lexerActionExecutor, self.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) + return prevAccept.dfaState.prediction + else: + # if no accept and EOF is first char, return EOF + if t==Token.EOF and input.index==self.startIndex: + return Token.EOF + raise LexerNoViableAltException(self.recog, input, self.startIndex, reach) + + # Given a starting configuration set, figure out all ATN configurations + # we can reach upon input {@code t}. Parameter {@code reach} is a return + # parameter. + def getReachableConfigSet(self, input:InputStream, closure:ATNConfigSet, reach:ATNConfigSet, t:int): + # this is used to skip processing for configs which have a lower priority + # than a config that already reached an accept state for the same rule + skipAlt = ATN.INVALID_ALT_NUMBER + for cfg in closure: + currentAltReachedAcceptState = ( cfg.alt == skipAlt ) + if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision: + continue + + if LexerATNSimulator.debug: + print("testing", self.getTokenName(t), "at", str(cfg)) + + for trans in cfg.state.transitions: # for each transition + target = self.getReachableTarget(trans, t) + if target is not None: + lexerActionExecutor = cfg.lexerActionExecutor + if lexerActionExecutor is not None: + lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - self.startIndex) + + treatEofAsEpsilon = (t == Token.EOF) + config = LexerATNConfig(state=target, lexerActionExecutor=lexerActionExecutor, config=cfg) + if self.closure(input, config, reach, currentAltReachedAcceptState, True, treatEofAsEpsilon): + # any remaining configs for this alt have a lower priority than + # the one that just reached an accept state. + skipAlt = cfg.alt + + def accept(self, input:InputStream, lexerActionExecutor:LexerActionExecutor, startIndex:int, index:int, line:int, charPos:int): + if LexerATNSimulator.debug: + print("ACTION", lexerActionExecutor) + + # seek to after last char in token + input.seek(index) + self.line = line + self.column = charPos + + if lexerActionExecutor is not None and self.recog is not None: + lexerActionExecutor.execute(self.recog, input, startIndex) + + def getReachableTarget(self, trans:Transition, t:int): + if trans.matches(t, 0, self.MAX_CHAR_VALUE): + return trans.target + else: + return None + + def computeStartState(self, input:InputStream, p:ATNState): + initialContext = PredictionContext.EMPTY + configs = OrderedATNConfigSet() + for i in range(0,len(p.transitions)): + target = p.transitions[i].target + c = LexerATNConfig(state=target, alt=i+1, context=initialContext) + self.closure(input, c, configs, False, False, False) + return configs + + # Since the alternatives within any lexer decision are ordered by + # preference, this method stops pursuing the closure as soon as an accept + # state is reached. After the first accept state is reached by depth-first + # search from {@code config}, all other (potentially reachable) states for + # this rule would have a lower priority. + # + # @return {@code true} if an accept state is reached, otherwise + # {@code false}. + def closure(self, input:InputStream, config:LexerATNConfig, configs:ATNConfigSet, currentAltReachedAcceptState:bool, + speculative:bool, treatEofAsEpsilon:bool): + if LexerATNSimulator.debug: + print("closure(" + str(config) + ")") + + if isinstance( config.state, RuleStopState ): + if LexerATNSimulator.debug: + if self.recog is not None: + print("closure at", self.recog.symbolicNames[config.state.ruleIndex], "rule stop", str(config)) + else: + print("closure at rule stop", str(config)) + + if config.context is None or config.context.hasEmptyPath(): + if config.context is None or config.context.isEmpty(): + configs.add(config) + return True + else: + configs.add(LexerATNConfig(state=config.state, config=config, context=PredictionContext.EMPTY)) + currentAltReachedAcceptState = True + + if config.context is not None and not config.context.isEmpty(): + for i in range(0,len(config.context)): + if config.context.getReturnState(i) != PredictionContext.EMPTY_RETURN_STATE: + newContext = config.context.getParent(i) # "pop" return state + returnState = self.atn.states[config.context.getReturnState(i)] + c = LexerATNConfig(state=returnState, config=config, context=newContext) + currentAltReachedAcceptState = self.closure(input, c, configs, + currentAltReachedAcceptState, speculative, treatEofAsEpsilon) + + return currentAltReachedAcceptState + + # optimization + if not config.state.epsilonOnlyTransitions: + if not currentAltReachedAcceptState or not config.passedThroughNonGreedyDecision: + configs.add(config) + + for t in config.state.transitions: + c = self.getEpsilonTarget(input, config, t, configs, speculative, treatEofAsEpsilon) + if c is not None: + currentAltReachedAcceptState = self.closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon) + + return currentAltReachedAcceptState + + # side-effect: can alter configs.hasSemanticContext + def getEpsilonTarget(self, input:InputStream, config:LexerATNConfig, t:Transition, configs:ATNConfigSet, + speculative:bool, treatEofAsEpsilon:bool): + c = None + if t.serializationType==Transition.RULE: + newContext = SingletonPredictionContext.create(config.context, t.followState.stateNumber) + c = LexerATNConfig(state=t.target, config=config, context=newContext) + + elif t.serializationType==Transition.PRECEDENCE: + raise UnsupportedOperationException("Precedence predicates are not supported in lexers.") + + elif t.serializationType==Transition.PREDICATE: + # Track traversing semantic predicates. If we traverse, + # we cannot add a DFA state for this "reach" computation + # because the DFA would not test the predicate again in the + # future. Rather than creating collections of semantic predicates + # like v3 and testing them on prediction, v4 will test them on the + # fly all the time using the ATN not the DFA. This is slower but + # semantically it's not used that often. One of the key elements to + # this predicate mechanism is not adding DFA states that see + # predicates immediately afterwards in the ATN. For example, + + # a : ID {p1}? | ID {p2}? ; + + # should create the start state for rule 'a' (to save start state + # competition), but should not create target of ID state. The + # collection of ATN states the following ID references includes + # states reached by traversing predicates. Since this is when we + # test them, we cannot cash the DFA state target of ID. + + if LexerATNSimulator.debug: + print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex)) + configs.hasSemanticContext = True + if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative): + c = LexerATNConfig(state=t.target, config=config) + + elif t.serializationType==Transition.ACTION: + if config.context is None or config.context.hasEmptyPath(): + # execute actions anywhere in the start rule for a token. + # + # TODO: if the entry rule is invoked recursively, some + # actions may be executed during the recursive call. The + # problem can appear when hasEmptyPath() is true but + # isEmpty() is false. In this case, the config needs to be + # split into two contexts - one with just the empty path + # and another with everything but the empty path. + # Unfortunately, the current algorithm does not allow + # getEpsilonTarget to return two configurations, so + # additional modifications are needed before we can support + # the split operation. + lexerActionExecutor = LexerActionExecutor.append(config.lexerActionExecutor, + self.atn.lexerActions[t.actionIndex]) + c = LexerATNConfig(state=t.target, config=config, lexerActionExecutor=lexerActionExecutor) + + else: + # ignore actions in referenced rules + c = LexerATNConfig(state=t.target, config=config) + + elif t.serializationType==Transition.EPSILON: + c = LexerATNConfig(state=t.target, config=config) + + elif t.serializationType in [ Transition.ATOM, Transition.RANGE, Transition.SET ]: + if treatEofAsEpsilon: + if t.matches(Token.EOF, 0, self.MAX_CHAR_VALUE): + c = LexerATNConfig(state=t.target, config=config) + + return c + + # Evaluate a predicate specified in the lexer. + # + #If {@code speculative} is {@code true}, this method was called before + # {@link #consume} for the matched character. This method should call + # {@link #consume} before evaluating the predicate to ensure position + # sensitive values, including {@link Lexer#getText}, {@link Lexer#getLine}, + # and {@link Lexer#getcolumn}, properly reflect the current + # lexer state. This method should restore {@code input} and the simulator + # to the original state before returning (i.e. undo the actions made by the + # call to {@link #consume}.
+ # + # @param input The input stream. + # @param ruleIndex The rule containing the predicate. + # @param predIndex The index of the predicate within the rule. + # @param speculative {@code true} if the current index in {@code input} is + # one character before the predicate's location. + # + # @return {@code true} if the specified predicate evaluates to + # {@code true}. + #/ + def evaluatePredicate(self, input:InputStream, ruleIndex:int, predIndex:int, speculative:bool): + # assume true if no recognizer was provided + if self.recog is None: + return True + + if not speculative: + return self.recog.sempred(None, ruleIndex, predIndex) + + savedcolumn = self.column + savedLine = self.line + index = input.index + marker = input.mark() + try: + self.consume(input) + return self.recog.sempred(None, ruleIndex, predIndex) + finally: + self.column = savedcolumn + self.line = savedLine + input.seek(index) + input.release(marker) + + def captureSimState(self, settings:SimState, input:InputStream, dfaState:DFAState): + settings.index = input.index + settings.line = self.line + settings.column = self.column + settings.dfaState = dfaState + + def addDFAEdge(self, from_:DFAState, tk:int, to:DFAState=None, cfgs:ATNConfigSet=None) -> DFAState: + + if to is None and cfgs is not None: + # leading to this call, ATNConfigSet.hasSemanticContext is used as a + # marker indicating dynamic predicate evaluation makes this edge + # dependent on the specific input sequence, so the static edge in the + # DFA should be omitted. The target DFAState is still created since + # execATN has the ability to resynchronize with the DFA state cache + # following the predicate evaluation step. + # + # TJP notes: next time through the DFA, we see a pred again and eval. + # If that gets us to a previously created (but dangling) DFA + # state, we can continue in pure DFA mode from there. + #/ + suppressEdge = cfgs.hasSemanticContext + cfgs.hasSemanticContext = False + + to = self.addDFAState(cfgs) + + if suppressEdge: + return to + + # add the edge + if tk < self.MIN_DFA_EDGE or tk > self.MAX_DFA_EDGE: + # Only track edges within the DFA bounds + return to + + if LexerATNSimulator.debug: + print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk)) + + if from_.edges is None: + # make room for tokens 1..n and -1 masquerading as index 0 + from_.edges = [ None ] * (self.MAX_DFA_EDGE - self.MIN_DFA_EDGE + 1) + + from_.edges[tk - self.MIN_DFA_EDGE] = to # connect + + return to + + + # Add a new DFA state if there isn't one with this set of + # configurations already. This method also detects the first + # configuration containing an ATN rule stop state. Later, when + # traversing the DFA, we will know which rule to accept. + def addDFAState(self, configs:ATNConfigSet) -> DFAState: + + proposed = DFAState(configs=configs) + firstConfigWithRuleStopState = next((cfg for cfg in configs if isinstance(cfg.state, RuleStopState)), None) + + if firstConfigWithRuleStopState is not None: + proposed.isAcceptState = True + proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor + proposed.prediction = self.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex] + + dfa = self.decisionToDFA[self.mode] + existing = dfa.states.get(proposed, None) + if existing is not None: + return existing + + newState = proposed + + newState.stateNumber = len(dfa.states) + configs.setReadonly(True) + newState.configs = configs + dfa.states[newState] = newState + return newState + + def getDFA(self, mode:int): + return self.decisionToDFA[mode] + + # Get the text matched so far for the current token. + def getText(self, input:InputStream): + # index is first lookahead char, don't include. + return input.getText(self.startIndex, input.index-1) + + def consume(self, input:InputStream): + curChar = input.LA(1) + if curChar==ord('\n'): + self.line += 1 + self.column = 0 + else: + self.column += 1 + input.consume() + + def getTokenName(self, t:int): + if t==-1: + return "EOF" + else: + return "'" + chr(t) + "'" + + +LexerATNSimulator.ERROR = DFAState(0x7FFFFFFF, ATNConfigSet()) + +del Lexer diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerAction.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerAction.py new file mode 100644 index 0000000000000000000000000000000000000000..0fa7a895f31cea4415b71def30ca1ec90167f1b3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerAction.py @@ -0,0 +1,298 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. + # + +from enum import IntEnum + +# need forward declaration +Lexer = None + + +class LexerActionType(IntEnum): + + CHANNEL = 0 #The type of a {@link LexerChannelAction} action. + CUSTOM = 1 #The type of a {@link LexerCustomAction} action. + MODE = 2 #The type of a {@link LexerModeAction} action. + MORE = 3 #The type of a {@link LexerMoreAction} action. + POP_MODE = 4 #The type of a {@link LexerPopModeAction} action. + PUSH_MODE = 5 #The type of a {@link LexerPushModeAction} action. + SKIP = 6 #The type of a {@link LexerSkipAction} action. + TYPE = 7 #The type of a {@link LexerTypeAction} action. + +class LexerAction(object): + __slots__ = ('actionType', 'isPositionDependent') + + def __init__(self, action:LexerActionType): + self.actionType = action + self.isPositionDependent = False + + def __hash__(self): + return hash(self.actionType) + + def __eq__(self, other): + return self is other + + +# +# Implements the {@code skip} lexer action by calling {@link Lexer#skip}. +# +#The {@code skip} command does not have any parameters, so this action is +# implemented as a singleton instance exposed by {@link #INSTANCE}.
+class LexerSkipAction(LexerAction): + + # Provides a singleton instance of this parameterless lexer action. + INSTANCE = None + + def __init__(self): + super().__init__(LexerActionType.SKIP) + + def execute(self, lexer:Lexer): + lexer.skip() + + def __str__(self): + return "skip" + +LexerSkipAction.INSTANCE = LexerSkipAction() + +# Implements the {@code type} lexer action by calling {@link Lexer#setType} +# with the assigned type. +class LexerTypeAction(LexerAction): + __slots__ = 'type' + + def __init__(self, type:int): + super().__init__(LexerActionType.TYPE) + self.type = type + + def execute(self, lexer:Lexer): + lexer.type = self.type + + def __hash__(self): + return hash((self.actionType, self.type)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerTypeAction): + return False + else: + return self.type == other.type + + def __str__(self): + return "type(" + str(self.type) + ")" + + +# Implements the {@code pushMode} lexer action by calling +# {@link Lexer#pushMode} with the assigned mode. +class LexerPushModeAction(LexerAction): + __slots__ = 'mode' + + def __init__(self, mode:int): + super().__init__(LexerActionType.PUSH_MODE) + self.mode = mode + + #This action is implemented by calling {@link Lexer#pushMode} with the + # value provided by {@link #getMode}.
+ def execute(self, lexer:Lexer): + lexer.pushMode(self.mode) + + def __hash__(self): + return hash((self.actionType, self.mode)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerPushModeAction): + return False + else: + return self.mode == other.mode + + def __str__(self): + return "pushMode(" + str(self.mode) + ")" + + +# Implements the {@code popMode} lexer action by calling {@link Lexer#popMode}. +# +#The {@code popMode} command does not have any parameters, so this action is +# implemented as a singleton instance exposed by {@link #INSTANCE}.
+class LexerPopModeAction(LexerAction): + + INSTANCE = None + + def __init__(self): + super().__init__(LexerActionType.POP_MODE) + + #This action is implemented by calling {@link Lexer#popMode}.
+ def execute(self, lexer:Lexer): + lexer.popMode() + + def __str__(self): + return "popMode" + +LexerPopModeAction.INSTANCE = LexerPopModeAction() + +# Implements the {@code more} lexer action by calling {@link Lexer#more}. +# +#The {@code more} command does not have any parameters, so this action is +# implemented as a singleton instance exposed by {@link #INSTANCE}.
+class LexerMoreAction(LexerAction): + + INSTANCE = None + + def __init__(self): + super().__init__(LexerActionType.MORE) + + #This action is implemented by calling {@link Lexer#popMode}.
+ def execute(self, lexer:Lexer): + lexer.more() + + def __str__(self): + return "more" + +LexerMoreAction.INSTANCE = LexerMoreAction() + +# Implements the {@code mode} lexer action by calling {@link Lexer#mode} with +# the assigned mode. +class LexerModeAction(LexerAction): + __slots__ = 'mode' + + def __init__(self, mode:int): + super().__init__(LexerActionType.MODE) + self.mode = mode + + #This action is implemented by calling {@link Lexer#mode} with the + # value provided by {@link #getMode}.
+ def execute(self, lexer:Lexer): + lexer.mode(self.mode) + + def __hash__(self): + return hash((self.actionType, self.mode)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerModeAction): + return False + else: + return self.mode == other.mode + + def __str__(self): + return "mode(" + str(self.mode) + ")" + +# Executes a custom lexer action by calling {@link Recognizer#action} with the +# rule and action indexes assigned to the custom action. The implementation of +# a custom action is added to the generated code for the lexer in an override +# of {@link Recognizer#action} when the grammar is compiled. +# +#This class may represent embedded actions created with the {...}
+# syntax in ANTLR 4, as well as actions created for lexer commands where the
+# command argument could not be evaluated when the grammar was compiled.
Custom actions are implemented by calling {@link Lexer#action} with the + # appropriate rule and action indexes.
+ def execute(self, lexer:Lexer): + lexer.action(None, self.ruleIndex, self.actionIndex) + + def __hash__(self): + return hash((self.actionType, self.ruleIndex, self.actionIndex)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerCustomAction): + return False + else: + return self.ruleIndex == other.ruleIndex and self.actionIndex == other.actionIndex + +# Implements the {@code channel} lexer action by calling +# {@link Lexer#setChannel} with the assigned channel. +class LexerChannelAction(LexerAction): + __slots__ = 'channel' + + # Constructs a new {@code channel} action with the specified channel value. + # @param channel The channel value to pass to {@link Lexer#setChannel}. + def __init__(self, channel:int): + super().__init__(LexerActionType.CHANNEL) + self.channel = channel + + #This action is implemented by calling {@link Lexer#setChannel} with the + # value provided by {@link #getChannel}.
+ def execute(self, lexer:Lexer): + lexer._channel = self.channel + + def __hash__(self): + return hash((self.actionType, self.channel)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerChannelAction): + return False + else: + return self.channel == other.channel + + def __str__(self): + return "channel(" + str(self.channel) + ")" + +# This implementation of {@link LexerAction} is used for tracking input offsets +# for position-dependent actions within a {@link LexerActionExecutor}. +# +#This action is not serialized as part of the ATN, and is only required for +# position-dependent lexer actions which appear at a location other than the +# end of a rule. For more information about DFA optimizations employed for +# lexer actions, see {@link LexerActionExecutor#append} and +# {@link LexerActionExecutor#fixOffsetBeforeMatch}.
+class LexerIndexedCustomAction(LexerAction): + __slots__ = ('offset', 'action') + + # Constructs a new indexed custom action by associating a character offset + # with a {@link LexerAction}. + # + #Note: This class is only required for lexer actions for which + # {@link LexerAction#isPositionDependent} returns {@code true}.
+ # + # @param offset The offset into the input {@link CharStream}, relative to + # the token start index, at which the specified lexer action should be + # executed. + # @param action The lexer action to execute at a particular offset in the + # input {@link CharStream}. + def __init__(self, offset:int, action:LexerAction): + super().__init__(action.actionType) + self.offset = offset + self.action = action + self.isPositionDependent = True + + #This method calls {@link #execute} on the result of {@link #getAction} + # using the provided {@code lexer}.
+ def execute(self, lexer:Lexer): + # assume the input stream position was properly set by the calling code + self.action.execute(lexer) + + def __hash__(self): + return hash((self.actionType, self.offset, self.action)) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerIndexedCustomAction): + return False + else: + return self.offset == other.offset and self.action == other.action diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerActionExecutor.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerActionExecutor.py new file mode 100644 index 0000000000000000000000000000000000000000..5c6462c3a28f4ccbcb0a65dcde96f497dd392416 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerActionExecutor.py @@ -0,0 +1,143 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ + +# Represents an executor for a sequence of lexer actions which traversed during +# the matching operation of a lexer rule (token). +# +#The executor tracks position information for position-dependent lexer actions +# efficiently, ensuring that actions appearing only at the end of the rule do +# not cause bloating of the {@link DFA} created for the lexer.
+ + +from antlr4.InputStream import InputStream +from antlr4.atn.LexerAction import LexerAction, LexerIndexedCustomAction + +# need a forward declaration +Lexer = None +LexerActionExecutor = None + +class LexerActionExecutor(object): + __slots__ = ('lexerActions', 'hashCode') + + def __init__(self, lexerActions:list=list()): + self.lexerActions = lexerActions + # Caches the result of {@link #hashCode} since the hash code is an element + # of the performance-critical {@link LexerATNConfig#hashCode} operation. + self.hashCode = hash("".join([str(la) for la in lexerActions])) + + + # Creates a {@link LexerActionExecutor} which executes the actions for + # the input {@code lexerActionExecutor} followed by a specified + # {@code lexerAction}. + # + # @param lexerActionExecutor The executor for actions already traversed by + # the lexer while matching a token within a particular + # {@link LexerATNConfig}. If this is {@code null}, the method behaves as + # though it were an empty executor. + # @param lexerAction The lexer action to execute after the actions + # specified in {@code lexerActionExecutor}. + # + # @return A {@link LexerActionExecutor} for executing the combine actions + # of {@code lexerActionExecutor} and {@code lexerAction}. + @staticmethod + def append(lexerActionExecutor:LexerActionExecutor , lexerAction:LexerAction ): + if lexerActionExecutor is None: + return LexerActionExecutor([ lexerAction ]) + + lexerActions = lexerActionExecutor.lexerActions + [ lexerAction ] + return LexerActionExecutor(lexerActions) + + # Creates a {@link LexerActionExecutor} which encodes the current offset + # for position-dependent lexer actions. + # + #Normally, when the executor encounters lexer actions where + # {@link LexerAction#isPositionDependent} returns {@code true}, it calls + # {@link IntStream#seek} on the input {@link CharStream} to set the input + # position to the end of the current token. This behavior provides + # for efficient DFA representation of lexer actions which appear at the end + # of a lexer rule, even when the lexer rule matches a variable number of + # characters.
+ # + #Prior to traversing a match transition in the ATN, the current offset + # from the token start index is assigned to all position-dependent lexer + # actions which have not already been assigned a fixed offset. By storing + # the offsets relative to the token start index, the DFA representation of + # lexer actions which appear in the middle of tokens remains efficient due + # to sharing among tokens of the same length, regardless of their absolute + # position in the input stream.
+ # + #If the current executor already has offsets assigned to all + # position-dependent lexer actions, the method returns {@code this}.
+ # + # @param offset The current offset to assign to all position-dependent + # lexer actions which do not already have offsets assigned. + # + # @return A {@link LexerActionExecutor} which stores input stream offsets + # for all position-dependent lexer actions. + #/ + def fixOffsetBeforeMatch(self, offset:int): + updatedLexerActions = None + for i in range(0, len(self.lexerActions)): + if self.lexerActions[i].isPositionDependent and not isinstance(self.lexerActions[i], LexerIndexedCustomAction): + if updatedLexerActions is None: + updatedLexerActions = [ la for la in self.lexerActions ] + updatedLexerActions[i] = LexerIndexedCustomAction(offset, self.lexerActions[i]) + + if updatedLexerActions is None: + return self + else: + return LexerActionExecutor(updatedLexerActions) + + + # Execute the actions encapsulated by this executor within the context of a + # particular {@link Lexer}. + # + #This method calls {@link IntStream#seek} to set the position of the + # {@code input} {@link CharStream} prior to calling + # {@link LexerAction#execute} on a position-dependent action. Before the + # method returns, the input position will be restored to the same position + # it was in when the method was invoked.
+ # + # @param lexer The lexer instance. + # @param input The input stream which is the source for the current token. + # When this method is called, the current {@link IntStream#index} for + # {@code input} should be the start of the following token, i.e. 1 + # character past the end of the current token. + # @param startIndex The token start index. This value may be passed to + # {@link IntStream#seek} to set the {@code input} position to the beginning + # of the token. + #/ + def execute(self, lexer:Lexer, input:InputStream, startIndex:int): + requiresSeek = False + stopIndex = input.index + try: + for lexerAction in self.lexerActions: + if isinstance(lexerAction, LexerIndexedCustomAction): + offset = lexerAction.offset + input.seek(startIndex + offset) + lexerAction = lexerAction.action + requiresSeek = (startIndex + offset) != stopIndex + elif lexerAction.isPositionDependent: + input.seek(stopIndex) + requiresSeek = False + lexerAction.execute(lexer) + finally: + if requiresSeek: + input.seek(stopIndex) + + def __hash__(self): + return self.hashCode + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, LexerActionExecutor): + return False + else: + return self.hashCode == other.hashCode \ + and self.lexerActions == other.lexerActions + +del Lexer diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ParserATNSimulator.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ParserATNSimulator.py new file mode 100644 index 0000000000000000000000000000000000000000..d1fb3d7ed8bab47fb851d85da88d20dc39715627 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ParserATNSimulator.py @@ -0,0 +1,1649 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# +# The embodiment of the adaptive LL(*), ALL(*), parsing strategy. +# +#+# The basic complexity of the adaptive strategy makes it harder to understand. +# We begin with ATN simulation to build paths in a DFA. Subsequent prediction +# requests go through the DFA first. If they reach a state without an edge for +# the current symbol, the algorithm fails over to the ATN simulation to +# complete the DFA path for the current input (until it finds a conflict state +# or uniquely predicting state).
+# +#+# All of that is done without using the outer context because we want to create +# a DFA that is not dependent upon the rule invocation stack when we do a +# prediction. One DFA works in all contexts. We avoid using context not +# necessarily because it's slower, although it can be, but because of the DFA +# caching problem. The closure routine only considers the rule invocation stack +# created during prediction beginning in the decision rule. For example, if +# prediction occurs without invoking another rule's ATN, there are no context +# stacks in the configurations. When lack of context leads to a conflict, we +# don't know if it's an ambiguity or a weakness in the strong LL(*) parsing +# strategy (versus full LL(*)).
+# +#+# When SLL yields a configuration set with conflict, we rewind the input and +# retry the ATN simulation, this time using full outer context without adding +# to the DFA. Configuration context stacks will be the full invocation stacks +# from the start rule. If we get a conflict using full context, then we can +# definitively say we have a true ambiguity for that input sequence. If we +# don't get a conflict, it implies that the decision is sensitive to the outer +# context. (It is not context-sensitive in the sense of context-sensitive +# grammars.)
+# +#+# The next time we reach this DFA state with an SLL conflict, through DFA +# simulation, we will again retry the ATN simulation using full context mode. +# This is slow because we can't save the results and have to "interpret" the +# ATN each time we get that input.
+# +#+# CACHING FULL CONTEXT PREDICTIONS
+# +#+# We could cache results from full context to predicted alternative easily and +# that saves a lot of time but doesn't work in presence of predicates. The set +# of visible predicates from the ATN start state changes depending on the +# context, because closure can fall off the end of a rule. I tried to cache +# tuples (stack context, semantic context, predicted alt) but it was slower +# than interpreting and much more complicated. Also required a huge amount of +# memory. The goal is not to create the world's fastest parser anyway. I'd like +# to keep this algorithm simple. By launching multiple threads, we can improve +# the speed of parsing across a large number of files.
+# +#+# There is no strict ordering between the amount of input used by SLL vs LL, +# which makes it really hard to build a cache for full context. Let's say that +# we have input A B C that leads to an SLL conflict with full context X. That +# implies that using X we might only use A B but we could also use A B C D to +# resolve conflict. Input A B C D could predict alternative 1 in one position +# in the input and A B C E could predict alternative 2 in another position in +# input. The conflicting SLL configurations could still be non-unique in the +# full context prediction, which would lead us to requiring more input than the +# original A B C. To make a prediction cache work, we have to track the exact +# input used during the previous prediction. That amounts to a cache that maps +# X to a specific DFA for that context.
+# +#+# Something should be done for left-recursive expression predictions. They are +# likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry +# with full LL thing Sam does.
+# +#+# AVOIDING FULL CONTEXT PREDICTION
+# +#+# We avoid doing full context retry when the outer context is empty, we did not +# dip into the outer context by falling off the end of the decision state rule, +# or when we force SLL mode.
+# +#+# As an example of the not dip into outer context case, consider as super +# constructor calls versus function calls. One grammar might look like +# this:
+# +#
+# ctorBody
+# : '{' superCall? stat* '}'
+# ;
+#
+#
+# +# Or, you might see something like
+# +#+# stat +# : superCall ';' +# | expression ';' +# | ... +# ; +#+# +#
+# In both cases I believe that no closure operations will dip into the outer +# context. In the first case ctorBody in the worst case will stop at the '}'. +# In the 2nd case it should stop at the ';'. Both cases should stay within the +# entry rule and not dip into the outer context.
+# +#+# PREDICATES
+# +#+# Predicates are always evaluated if present in either SLL or LL both. SLL and +# LL simulation deals with predicates differently. SLL collects predicates as +# it performs closure operations like ANTLR v3 did. It delays predicate +# evaluation until it reaches and accept state. This allows us to cache the SLL +# ATN simulation whereas, if we had evaluated predicates on-the-fly during +# closure, the DFA state configuration sets would be different and we couldn't +# build up a suitable DFA.
+# +#+# When building a DFA accept state during ATN simulation, we evaluate any +# predicates and return the sole semantically valid alternative. If there is +# more than 1 alternative, we report an ambiguity. If there are 0 alternatives, +# we throw an exception. Alternatives without predicates act like they have +# true predicates. The simple way to think about it is to strip away all +# alternatives with false predicates and choose the minimum alternative that +# remains.
+# +#+# When we start in the DFA and reach an accept state that's predicated, we test +# those and return the minimum semantically viable alternative. If no +# alternatives are viable, we throw an exception.
+# +#+# During full LL ATN simulation, closure always evaluates predicates and +# on-the-fly. This is crucial to reducing the configuration set size during +# closure. It hits a landmine when parsing with the Java grammar, for example, +# without this on-the-fly evaluation.
+# +#+# SHARING DFA
+# +#+# All instances of the same parser share the same decision DFAs through a +# static field. Each instance gets its own ATN simulator but they share the +# same {@link #decisionToDFA} field. They also share a +# {@link PredictionContextCache} object that makes sure that all +# {@link PredictionContext} objects are shared among the DFA states. This makes +# a big size difference.
+# +#+# THREAD SAFETY
+# +#+# The {@link ParserATNSimulator} locks on the {@link #decisionToDFA} field when +# it adds a new DFA object to that array. {@link #addDFAEdge} +# locks on the DFA for the current decision when setting the +# {@link DFAState#edges} field. {@link #addDFAState} locks on +# the DFA for the current decision when looking up a DFA state to see if it +# already exists. We must make sure that all requests to add DFA states that +# are equivalent result in the same shared DFA object. This is because lots of +# threads will be trying to update the DFA at once. The +# {@link #addDFAState} method also locks inside the DFA lock +# but this time on the shared context cache when it rebuilds the +# configurations' {@link PredictionContext} objects using cached +# subgraphs/nodes. No other locking occurs, even during DFA simulation. This is +# safe as long as we can guarantee that all threads referencing +# {@code s.edge[t]} get the same physical target {@link DFAState}, or +# {@code null}. Once into the DFA, the DFA simulation does not reference the +# {@link DFA#states} map. It follows the {@link DFAState#edges} field to new +# targets. The DFA simulator will either find {@link DFAState#edges} to be +# {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or +# {@code dfa.edges[t]} to be non-null. The +# {@link #addDFAEdge} method could be racing to set the field +# but in either case the DFA simulator works; if {@code null}, and requests ATN +# simulation. It could also race trying to get {@code dfa.edges[t]}, but either +# way it will work because it's not doing a test and set operation.
+# +#+# Starting with SLL then failing to combined SLL/LL (Two-Stage +# Parsing)
+# +#+# Sam pointed out that if SLL does not give a syntax error, then there is no +# point in doing full LL, which is slower. We only have to try LL if we get a +# syntax error. For maximum speed, Sam starts the parser set to pure SLL +# mode with the {@link BailErrorStrategy}:
+# +#
+# parser.{@link Parser#getInterpreter() getInterpreter()}.{@link #setPredictionMode setPredictionMode}{@code (}{@link PredictionMode#SLL}{@code )};
+# parser.{@link Parser#setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}());
+#
+#
+# +# If it does not get a syntax error, then we're done. If it does get a syntax +# error, we need to retry with the combined SLL/LL strategy.
+# +#+# The reason this works is as follows. If there are no SLL conflicts, then the +# grammar is SLL (at least for that input set). If there is an SLL conflict, +# the full LL analysis must yield a set of viable alternatives which is a +# subset of the alternatives reported by SLL. If the LL set is a singleton, +# then the grammar is LL but not SLL. If the LL set is the same size as the SLL +# set, the decision is SLL. If the LL set has size > 1, then that decision +# is truly ambiguous on the current input. If the LL set is smaller, then the +# SLL conflict resolution might choose an alternative that the full LL would +# rule out as a possibility based upon better context information. If that's +# the case, then the SLL parse will definitely get an error because the full LL +# analysis says it's not viable. If SLL conflict resolution chooses an +# alternative within the LL set, them both SLL and LL would choose the same +# alternative because they both choose the minimum of multiple conflicting +# alternatives.
+# +#+# Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and +# a smaller LL set called s. If s is {@code {2, 3}}, then SLL +# parsing will get an error because SLL will pursue alternative 1. If +# s is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will +# choose the same alternative because alternative one is the minimum of either +# set. If s is {@code {2}} or {@code {3}} then SLL will get a syntax +# error. If s is {@code {1}} then SLL will succeed.
+# +#+# Of course, if the input is invalid, then we will get an error for sure in +# both SLL and LL parsing. Erroneous input will therefore require 2 passes over +# the input.
+# +import sys +from antlr4 import DFA +from antlr4.PredictionContext import PredictionContextCache, PredictionContext, SingletonPredictionContext, \ + PredictionContextFromRuleContext +from antlr4.BufferedTokenStream import TokenStream +from antlr4.Parser import Parser +from antlr4.ParserRuleContext import ParserRuleContext +from antlr4.RuleContext import RuleContext +from antlr4.Token import Token +from antlr4.Utils import str_list +from antlr4.atn.ATN import ATN +from antlr4.atn.ATNConfig import ATNConfig +from antlr4.atn.ATNConfigSet import ATNConfigSet +from antlr4.atn.ATNSimulator import ATNSimulator +from antlr4.atn.ATNState import StarLoopEntryState, DecisionState, RuleStopState, ATNState +from antlr4.atn.PredictionMode import PredictionMode +from antlr4.atn.SemanticContext import SemanticContext, AND, andContext, orContext +from antlr4.atn.Transition import Transition, RuleTransition, ActionTransition, PrecedencePredicateTransition, \ + PredicateTransition, AtomTransition, SetTransition, NotSetTransition +from antlr4.dfa.DFAState import DFAState, PredPrediction +from antlr4.error.Errors import NoViableAltException + + +class ParserATNSimulator(ATNSimulator): + __slots__ = ( + 'parser', 'decisionToDFA', 'predictionMode', '_input', '_startIndex', + '_outerContext', '_dfa', 'mergeCache' + ) + + debug = False + debug_list_atn_decisions = False + dfa_debug = False + retry_debug = False + + + def __init__(self, parser:Parser, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache): + super().__init__(atn, sharedContextCache) + self.parser = parser + self.decisionToDFA = decisionToDFA + # SLL, LL, or LL + exact ambig detection?# + self.predictionMode = PredictionMode.LL + # LAME globals to avoid parameters!!!!! I need these down deep in predTransition + self._input = None + self._startIndex = 0 + self._outerContext = None + self._dfa = None + # Each prediction operation uses a cache for merge of prediction contexts. + # Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + # isn't synchronized but we're ok since two threads shouldn't reuse same + # parser/atnsim object because it can only handle one input at a time. + # This maps graphs a and b to merged result c. (a,b)→c. We can avoid + # the merge if we ever see a and b again. Note that (b,a)→c should + # also be examined during cache lookup. + # + self.mergeCache = None + + + def reset(self): + pass + + def adaptivePredict(self, input:TokenStream, decision:int, outerContext:ParserRuleContext): + if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions: + print("adaptivePredict decision " + str(decision) + + " exec LA(1)==" + self.getLookaheadName(input) + + " line " + str(input.LT(1).line) + ":" + + str(input.LT(1).column)) + self._input = input + self._startIndex = input.index + self._outerContext = outerContext + + dfa = self.decisionToDFA[decision] + self._dfa = dfa + m = input.mark() + index = input.index + + # Now we are certain to have a specific decision's DFA + # But, do we still need an initial state? + try: + if dfa.precedenceDfa: + # the start state for a precedence DFA depends on the current + # parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(self.parser.getPrecedence()) + else: + # the start state for a "regular" DFA is just s0 + s0 = dfa.s0 + + if s0 is None: + if outerContext is None: + outerContext = ParserRuleContext.EMPTY + if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions: + print("predictATN decision " + str(dfa.decision) + + " exec LA(1)==" + self.getLookaheadName(input) + + ", outerContext=" + outerContext.toString(self.parser.literalNames, None)) + + fullCtx = False + s0_closure = self.computeStartState(dfa.atnStartState, ParserRuleContext.EMPTY, fullCtx) + + if dfa.precedenceDfa: + # If this is a precedence DFA, we use applyPrecedenceFilter + # to convert the computed start state to a precedence start + # state. We then use DFA.setPrecedenceStartState to set the + # appropriate start state for the precedence level rather + # than simply setting DFA.s0. + # + dfa.s0.configs = s0_closure # not used for prediction but useful to know start configs anyway + s0_closure = self.applyPrecedenceFilter(s0_closure) + s0 = self.addDFAState(dfa, DFAState(configs=s0_closure)) + dfa.setPrecedenceStartState(self.parser.getPrecedence(), s0) + else: + s0 = self.addDFAState(dfa, DFAState(configs=s0_closure)) + dfa.s0 = s0 + + alt = self.execATN(dfa, s0, input, index, outerContext) + if ParserATNSimulator.debug: + print("DFA after predictATN: " + dfa.toString(self.parser.literalNames)) + return alt + finally: + self._dfa = None + self.mergeCache = None # wack cache after each prediction + input.seek(index) + input.release(m) + + # Performs ATN simulation to compute a predicted alternative based + # upon the remaining input, but also updates the DFA cache to avoid + # having to traverse the ATN again for the same input sequence. + + # There are some key conditions we're looking for after computing a new + # set of ATN configs (proposed DFA state): + # if the set is empty, there is no viable alternative for current symbol + # does the state uniquely predict an alternative? + # does the state have a conflict that would prevent us from + # putting it on the work list? + + # We also have some key operations to do: + # add an edge from previous DFA state to potentially new DFA state, D, + # upon current symbol but only if adding to work list, which means in all + # cases except no viable alternative (and possibly non-greedy decisions?) + # collecting predicates and adding semantic context to DFA accept states + # adding rule context to context-sensitive DFA accept states + # consuming an input symbol + # reporting a conflict + # reporting an ambiguity + # reporting a context sensitivity + # reporting insufficient predicates + + # cover these cases: + # dead end + # single alt + # single alt + preds + # conflict + # conflict + preds + # + def execATN(self, dfa:DFA, s0:DFAState, input:TokenStream, startIndex:int, outerContext:ParserRuleContext ): + if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions: + print("execATN decision " + str(dfa.decision) + + " exec LA(1)==" + self.getLookaheadName(input) + + " line " + str(input.LT(1).line) + ":" + str(input.LT(1).column)) + + previousD = s0 + + if ParserATNSimulator.debug: + print("s0 = " + str(s0)) + + t = input.LA(1) + + while True: # while more work + D = self.getExistingTargetState(previousD, t) + if D is None: + D = self.computeTargetState(dfa, previousD, t) + if D is self.ERROR: + # if any configs in previous dipped into outer context, that + # means that input up to t actually finished entry rule + # at least for SLL decision. Full LL doesn't dip into outer + # so don't need special case. + # We will get an error no matter what so delay until after + # decision; better error message. Also, no reachable target + # ATN states in SLL implies LL will also get nowhere. + # If conflict in states that dip out, choose min since we + # will get error no matter what. + e = self.noViableAlt(input, outerContext, previousD.configs, startIndex) + input.seek(startIndex) + alt = self.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + if alt!=ATN.INVALID_ALT_NUMBER: + return alt + raise e + + if D.requiresFullContext and self.predictionMode != PredictionMode.SLL: + # IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + conflictingAlts = D.configs.conflictingAlts + if D.predicates is not None: + if ParserATNSimulator.debug: + print("DFA state has preds in DFA sim LL failover") + conflictIndex = input.index + if conflictIndex != startIndex: + input.seek(startIndex) + + conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True) + if len(conflictingAlts)==1: + if ParserATNSimulator.debug: + print("Full LL avoided") + return min(conflictingAlts) + + if conflictIndex != startIndex: + # restore the index so reporting the fallback to full + # context occurs with the index at the correct spot + input.seek(conflictIndex) + + if ParserATNSimulator.dfa_debug: + print("ctx sensitive state " + str(outerContext) +" in " + str(D)) + fullCtx = True + s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx) + self.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index) + alt = self.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext) + return alt + + if D.isAcceptState: + if D.predicates is None: + return D.prediction + + stopIndex = input.index + input.seek(startIndex) + alts = self.evalSemanticContext(D.predicates, outerContext, True) + if len(alts)==0: + raise self.noViableAlt(input, outerContext, D.configs, startIndex) + elif len(alts)==1: + return min(alts) + else: + # report ambiguity after predicate evaluation to make sure the correct + # set of ambig alts is reported. + self.reportAmbiguity(dfa, D, startIndex, stopIndex, False, alts, D.configs) + return min(alts) + + previousD = D + + if t != Token.EOF: + input.consume() + t = input.LA(1) + + # + # Get an existing target state for an edge in the DFA. If the target state + # for the edge has not yet been computed or is otherwise not available, + # this method returns {@code null}. + # + # @param previousD The current DFA state + # @param t The next input symbol + # @return The existing target DFA state for the given input symbol + # {@code t}, or {@code null} if the target state for this edge is not + # already cached + # + def getExistingTargetState(self, previousD:DFAState, t:int): + edges = previousD.edges + if edges is None or t + 1 < 0 or t + 1 >= len(edges): + return None + else: + return edges[t + 1] + + # + # Compute a target state for an edge in the DFA, and attempt to add the + # computed state and corresponding edge to the DFA. + # + # @param dfa The DFA + # @param previousD The current DFA state + # @param t The next input symbol + # + # @return The computed target DFA state for the given input symbol + # {@code t}. If {@code t} does not lead to a valid DFA state, this method + # returns {@link #ERROR}. + # + def computeTargetState(self, dfa:DFA, previousD:DFAState, t:int): + reach = self.computeReachSet(previousD.configs, t, False) + if reach is None: + self.addDFAEdge(dfa, previousD, t, self.ERROR) + return self.ERROR + + # create new target state; we'll add to DFA after it's complete + D = DFAState(configs=reach) + + predictedAlt = self.getUniqueAlt(reach) + + if ParserATNSimulator.debug: + altSubSets = PredictionMode.getConflictingAltSubsets(reach) + print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) + + ", predict=" + str(predictedAlt) + ", allSubsetsConflict=" + + str(PredictionMode.allSubsetsConflict(altSubSets)) + ", conflictingAlts=" + + str(self.getConflictingAlts(reach))) + + if predictedAlt!=ATN.INVALID_ALT_NUMBER: + # NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = True + D.configs.uniqueAlt = predictedAlt + D.prediction = predictedAlt + elif PredictionMode.hasSLLConflictTerminatingPrediction(self.predictionMode, reach): + # MORE THAN ONE VIABLE ALTERNATIVE + D.configs.conflictingAlts = self.getConflictingAlts(reach) + D.requiresFullContext = True + # in SLL-only mode, we will stop at this state and return the minimum alt + D.isAcceptState = True + D.prediction = min(D.configs.conflictingAlts) + + if D.isAcceptState and D.configs.hasSemanticContext: + self.predicateDFAState(D, self.atn.getDecisionState(dfa.decision)) + if D.predicates is not None: + D.prediction = ATN.INVALID_ALT_NUMBER + + # all adds to dfa are done after we've created full D state + D = self.addDFAEdge(dfa, previousD, t, D) + return D + + def predicateDFAState(self, dfaState:DFAState, decisionState:DecisionState): + # We need to test all predicates, even in DFA states that + # uniquely predict alternative. + nalts = len(decisionState.transitions) + # Update DFA so reach becomes accept state with (predicate,alt) + # pairs if preds found for conflicting alts + altsToCollectPredsFrom = self.getConflictingAltsOrUniqueAlt(dfaState.configs) + altToPred = self.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) + if altToPred is not None: + dfaState.predicates = self.getPredicatePredictions(altsToCollectPredsFrom, altToPred) + dfaState.prediction = ATN.INVALID_ALT_NUMBER # make sure we use preds + else: + # There are preds in configs but they might go away + # when OR'd together like {p}? || NONE == NONE. If neither + # alt has preds, resolve to min alt + dfaState.prediction = min(altsToCollectPredsFrom) + + # comes back with reach.uniqueAlt set to a valid alt + def execATNWithFullContext(self, dfa:DFA, D:DFAState, # how far we got before failing over + s0:ATNConfigSet, + input:TokenStream, + startIndex:int, + outerContext:ParserRuleContext): + if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions: + print("execATNWithFullContext", str(s0)) + fullCtx = True + foundExactAmbig = False + reach = None + previous = s0 + input.seek(startIndex) + t = input.LA(1) + predictedAlt = -1 + while (True): # while more work + reach = self.computeReachSet(previous, t, fullCtx) + if reach is None: + # if any configs in previous dipped into outer context, that + # means that input up to t actually finished entry rule + # at least for LL decision. Full LL doesn't dip into outer + # so don't need special case. + # We will get an error no matter what so delay until after + # decision; better error message. Also, no reachable target + # ATN states in SLL implies LL will also get nowhere. + # If conflict in states that dip out, choose min since we + # will get error no matter what. + e = self.noViableAlt(input, outerContext, previous, startIndex) + input.seek(startIndex) + alt = self.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + if alt!=ATN.INVALID_ALT_NUMBER: + return alt + else: + raise e + + altSubSets = PredictionMode.getConflictingAltSubsets(reach) + if ParserATNSimulator.debug: + print("LL altSubSets=" + str(altSubSets) + ", predict=" + + str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + + str(PredictionMode.resolvesToJustOneViableAlt(altSubSets))) + + reach.uniqueAlt = self.getUniqueAlt(reach) + # unique prediction? + if reach.uniqueAlt!=ATN.INVALID_ALT_NUMBER: + predictedAlt = reach.uniqueAlt + break + elif self.predictionMode is not PredictionMode.LL_EXACT_AMBIG_DETECTION: + predictedAlt = PredictionMode.resolvesToJustOneViableAlt(altSubSets) + if predictedAlt != ATN.INVALID_ALT_NUMBER: + break + else: + # In exact ambiguity mode, we never try to terminate early. + # Just keeps scarfing until we know what the conflict is + if PredictionMode.allSubsetsConflict(altSubSets) and PredictionMode.allSubsetsEqual(altSubSets): + foundExactAmbig = True + predictedAlt = PredictionMode.getSingleViableAlt(altSubSets) + break + # else there are multiple non-conflicting subsets or + # we're not sure what the ambiguity is yet. + # So, keep going. + + previous = reach + if t != Token.EOF: + input.consume() + t = input.LA(1) + + # If the configuration set uniquely predicts an alternative, + # without conflict, then we know that it's a full LL decision + # not SLL. + if reach.uniqueAlt != ATN.INVALID_ALT_NUMBER : + self.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index) + return predictedAlt + + # We do not check predicates here because we have checked them + # on-the-fly when doing full context prediction. + + # + # In non-exact ambiguity detection mode, we might actually be able to + # detect an exact ambiguity, but I'm not going to spend the cycles + # needed to check. We only emit ambiguity warnings in exact ambiguity + # mode. + # + # For example, we might know that we have conflicting configurations. + # But, that does not mean that there is no way forward without a + # conflict. It's possible to have nonconflicting alt subsets as in: + + # altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + + # from + # + # [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + # (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + # + # In this case, (17,1,[5 $]) indicates there is some next sequence that + # would resolve this without conflict to alternative 1. Any other viable + # next sequence, however, is associated with a conflict. We stop + # looking for input because no amount of further lookahead will alter + # the fact that we should predict alternative 1. We just can't say for + # sure that there is an ambiguity without looking further. + + self.reportAmbiguity(dfa, D, startIndex, input.index, foundExactAmbig, None, reach) + + return predictedAlt + + def computeReachSet(self, closure:ATNConfigSet, t:int, fullCtx:bool): + if ParserATNSimulator.debug: + print("in computeReachSet, starting closure: " + str(closure)) + + if self.mergeCache is None: + self.mergeCache = dict() + + intermediate = ATNConfigSet(fullCtx) + + # Configurations already in a rule stop state indicate reaching the end + # of the decision rule (local context) or end of the start rule (full + # context). Once reached, these configurations are never updated by a + # closure operation, so they are handled separately for the performance + # advantage of having a smaller intermediate set when calling closure. + # + # For full-context reach operations, separate handling is required to + # ensure that the alternative matching the longest overall sequence is + # chosen when multiple such configurations can match the input. + + skippedStopStates = None + + # First figure out where we can reach on input t + for c in closure: + if ParserATNSimulator.debug: + print("testing " + self.getTokenName(t) + " at " + str(c)) + + if isinstance(c.state, RuleStopState): + if fullCtx or t == Token.EOF: + if skippedStopStates is None: + skippedStopStates = list() + skippedStopStates.append(c) + continue + + for trans in c.state.transitions: + target = self.getReachableTarget(trans, t) + if target is not None: + intermediate.add(ATNConfig(state=target, config=c), self.mergeCache) + + # Now figure out where the reach operation can take us... + + reach = None + + # This block optimizes the reach operation for intermediate sets which + # trivially indicate a termination state for the overall + # adaptivePredict operation. + # + # The conditions assume that intermediate + # contains all configurations relevant to the reach set, but this + # condition is not true when one or more configurations have been + # withheld in skippedStopStates, or when the current symbol is EOF. + # + if skippedStopStates is None and t!=Token.EOF: + if len(intermediate)==1: + # Don't pursue the closure if there is just one state. + # It can only have one alternative; just add to result + # Also don't pursue the closure if there is unique alternative + # among the configurations. + reach = intermediate + elif self.getUniqueAlt(intermediate)!=ATN.INVALID_ALT_NUMBER: + # Also don't pursue the closure if there is unique alternative + # among the configurations. + reach = intermediate + + # If the reach set could not be trivially determined, perform a closure + # operation on the intermediate set to compute its initial value. + # + if reach is None: + reach = ATNConfigSet(fullCtx) + closureBusy = set() + treatEofAsEpsilon = t == Token.EOF + for c in intermediate: + self.closure(c, reach, closureBusy, False, fullCtx, treatEofAsEpsilon) + + if t == Token.EOF: + # After consuming EOF no additional input is possible, so we are + # only interested in configurations which reached the end of the + # decision rule (local context) or end of the start rule (full + # context). Update reach to contain only these configurations. This + # handles both explicit EOF transitions in the grammar and implicit + # EOF transitions following the end of the decision or start rule. + # + # When reach==intermediate, no closure operation was performed. In + # this case, removeAllConfigsNotInRuleStopState needs to check for + # reachable rule stop states as well as configurations already in + # a rule stop state. + # + # This is handled before the configurations in skippedStopStates, + # because any configurations potentially added from that list are + # already guaranteed to meet this condition whether or not it's + # required. + # + reach = self.removeAllConfigsNotInRuleStopState(reach, reach is intermediate) + + # If skippedStopStates is not null, then it contains at least one + # configuration. For full-context reach operations, these + # configurations reached the end of the start rule, in which case we + # only add them back to reach if no configuration during the current + # closure operation reached such a state. This ensures adaptivePredict + # chooses an alternative matching the longest overall sequence when + # multiple alternatives are viable. + # + if skippedStopStates is not None and ( (not fullCtx) or (not PredictionMode.hasConfigInRuleStopState(reach))): + for c in skippedStopStates: + reach.add(c, self.mergeCache) + if len(reach)==0: + return None + else: + return reach + + # + # Return a configuration set containing only the configurations from + # {@code configs} which are in a {@link RuleStopState}. If all + # configurations in {@code configs} are already in a rule stop state, this + # method simply returns {@code configs}. + # + #When {@code lookToEndOfRule} is true, this method uses + # {@link ATN#nextTokens} for each configuration in {@code configs} which is + # not already in a rule stop state to see if a rule stop state is reachable + # from the configuration via epsilon-only transitions.
+ # + # @param configs the configuration set to update + # @param lookToEndOfRule when true, this method checks for rule stop states + # reachable by epsilon-only transitions from each configuration in + # {@code configs}. + # + # @return {@code configs} if all configurations in {@code configs} are in a + # rule stop state, otherwise return a new configuration set containing only + # the configurations from {@code configs} which are in a rule stop state + # + def removeAllConfigsNotInRuleStopState(self, configs:ATNConfigSet, lookToEndOfRule:bool): + if PredictionMode.allConfigsInRuleStopStates(configs): + return configs + result = ATNConfigSet(configs.fullCtx) + for config in configs: + if isinstance(config.state, RuleStopState): + result.add(config, self.mergeCache) + continue + if lookToEndOfRule and config.state.epsilonOnlyTransitions: + nextTokens = self.atn.nextTokens(config.state) + if Token.EPSILON in nextTokens: + endOfRuleState = self.atn.ruleToStopState[config.state.ruleIndex] + result.add(ATNConfig(state=endOfRuleState, config=config), self.mergeCache) + return result + + def computeStartState(self, p:ATNState, ctx:RuleContext, fullCtx:bool): + # always at least the implicit call to start rule + initialContext = PredictionContextFromRuleContext(self.atn, ctx) + configs = ATNConfigSet(fullCtx) + + for i in range(0, len(p.transitions)): + target = p.transitions[i].target + c = ATNConfig(target, i+1, initialContext) + closureBusy = set() + self.closure(c, configs, closureBusy, True, fullCtx, False) + return configs + + # + # This method transforms the start state computed by + # {@link #computeStartState} to the special start state used by a + # precedence DFA for a particular precedence value. The transformation + # process applies the following changes to the start state's configuration + # set. + # + #+ # The prediction context must be considered by this filter to address + # situations like the following. + #
+ #
+ #
+ # grammar TA;
+ # prog: statement* EOF;
+ # statement: letterA | statement letterA 'b' ;
+ # letterA: 'a';
+ #
+ #
+ # + # If the above grammar, the ATN state immediately before the token + # reference {@code 'a'} in {@code letterA} is reachable from the left edge + # of both the primary and closure blocks of the left-recursive rule + # {@code statement}. The prediction context associated with each of these + # configurations distinguishes between them, and prevents the alternative + # which stepped out to {@code prog} (and then back in to {@code statement} + # from being eliminated by the filter. + #
+ # + # @param configs The configuration set computed by + # {@link #computeStartState} as the start state for the DFA. + # @return The transformed configuration set representing the start state + # for a precedence DFA at a particular precedence level (determined by + # calling {@link Parser#getPrecedence}). + # + def applyPrecedenceFilter(self, configs:ATNConfigSet): + statesFromAlt1 = dict() + configSet = ATNConfigSet(configs.fullCtx) + for config in configs: + # handle alt 1 first + if config.alt != 1: + continue + updatedContext = config.semanticContext.evalPrecedence(self.parser, self._outerContext) + if updatedContext is None: + # the configuration was eliminated + continue + + statesFromAlt1[config.state.stateNumber] = config.context + if updatedContext is not config.semanticContext: + configSet.add(ATNConfig(config=config, semantic=updatedContext), self.mergeCache) + else: + configSet.add(config, self.mergeCache) + + for config in configs: + if config.alt == 1: + # already handled + continue + + # In the future, this elimination step could be updated to also + # filter the prediction context for alternatives predicting alt>1 + # (basically a graph subtraction algorithm). + # + if not config.precedenceFilterSuppressed: + context = statesFromAlt1.get(config.state.stateNumber, None) + if context==config.context: + # eliminated + continue + + configSet.add(config, self.mergeCache) + + return configSet + + def getReachableTarget(self, trans:Transition, ttype:int): + if trans.matches(ttype, 0, self.atn.maxTokenType): + return trans.target + else: + return None + + def getPredsForAmbigAlts(self, ambigAlts:set, configs:ATNConfigSet, nalts:int): + # REACH=[1|1|[]|0:0, 1|2|[]|0:1] + # altToPred starts as an array of all null contexts. The entry at index i + # corresponds to alternative i. altToPred[i] may have one of three values: + # 1. null: no ATNConfig c is found such that c.alt==i + # 2. SemanticContext.NONE: At least one ATNConfig c exists such that + # c.alt==i and c.semanticContext==SemanticContext.NONE. In other words, + # alt i has at least one unpredicated config. + # 3. Non-NONE Semantic Context: There exists at least one, and for all + # ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE. + # + # From this, it is clear that NONE||anything==NONE. + # + altToPred = [None] * (nalts + 1) + for c in configs: + if c.alt in ambigAlts: + altToPred[c.alt] = orContext(altToPred[c.alt], c.semanticContext) + + nPredAlts = 0 + for i in range(1, nalts+1): + if altToPred[i] is None: + altToPred[i] = SemanticContext.NONE + elif altToPred[i] is not SemanticContext.NONE: + nPredAlts += 1 + + # nonambig alts are null in altToPred + if nPredAlts==0: + altToPred = None + if ParserATNSimulator.debug: + print("getPredsForAmbigAlts result " + str_list(altToPred)) + return altToPred + + def getPredicatePredictions(self, ambigAlts:set, altToPred:list): + pairs = [] + containsPredicate = False + for i in range(1, len(altToPred)): + pred = altToPred[i] + # unpredicated is indicated by SemanticContext.NONE + if ambigAlts is not None and i in ambigAlts: + pairs.append(PredPrediction(pred, i)) + if pred is not SemanticContext.NONE: + containsPredicate = True + + if not containsPredicate: + return None + + return pairs + + # + # This method is used to improve the localization of error messages by + # choosing an alternative rather than throwing a + # {@link NoViableAltException} in particular prediction scenarios where the + # {@link #ERROR} state was reached during ATN simulation. + # + #+ # The default implementation of this method uses the following + # algorithm to identify an ATN configuration which successfully parsed the + # decision entry rule. Choosing such an alternative ensures that the + # {@link ParserRuleContext} returned by the calling rule will be complete + # and valid, and the syntax error will be reported later at a more + # localized location.
+ # + #+ # In some scenarios, the algorithm described above could predict an + # alternative which will result in a {@link FailedPredicateException} in + # the parser. Specifically, this could occur if the only configuration + # capable of successfully parsing to the end of the decision rule is + # blocked by a semantic predicate. By choosing this alternative within + # {@link #adaptivePredict} instead of throwing a + # {@link NoViableAltException}, the resulting + # {@link FailedPredicateException} in the parser will identify the specific + # predicate which is preventing the parser from successfully parsing the + # decision rule, which helps developers identify and correct logic errors + # in semantic predicates. + #
+ # + # @param configs The ATN configurations which were valid immediately before + # the {@link #ERROR} state was reached + # @param outerContext The is the \gamma_0 initial parser context from the paper + # or the parser stack at the instant before prediction commences. + # + # @return The value to return from {@link #adaptivePredict}, or + # {@link ATN#INVALID_ALT_NUMBER} if a suitable alternative was not + # identified and {@link #adaptivePredict} should report an error instead. + # + def getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(self, configs:ATNConfigSet, outerContext:ParserRuleContext): + semValidConfigs, semInvalidConfigs = self.splitAccordingToSemanticValidity(configs, outerContext) + alt = self.getAltThatFinishedDecisionEntryRule(semValidConfigs) + if alt!=ATN.INVALID_ALT_NUMBER: # semantically/syntactically viable path exists + return alt + # Is there a syntactically valid path with a failed pred? + if len(semInvalidConfigs)>0: + alt = self.getAltThatFinishedDecisionEntryRule(semInvalidConfigs) + if alt!=ATN.INVALID_ALT_NUMBER: # syntactically viable path exists + return alt + return ATN.INVALID_ALT_NUMBER + + def getAltThatFinishedDecisionEntryRule(self, configs:ATNConfigSet): + alts = set() + for c in configs: + if c.reachesIntoOuterContext>0 or (isinstance(c.state, RuleStopState) and c.context.hasEmptyPath() ): + alts.add(c.alt) + if len(alts)==0: + return ATN.INVALID_ALT_NUMBER + else: + return min(alts) + + # Walk the list of configurations and split them according to + # those that have preds evaluating to true/false. If no pred, assume + # true pred and include in succeeded set. Returns Pair of sets. + # + # Create a new set so as not to alter the incoming parameter. + # + # Assumption: the input stream has been restored to the starting point + # prediction, which is where predicates need to evaluate. + # + def splitAccordingToSemanticValidity(self, configs:ATNConfigSet, outerContext:ParserRuleContext): + succeeded = ATNConfigSet(configs.fullCtx) + failed = ATNConfigSet(configs.fullCtx) + for c in configs: + if c.semanticContext is not SemanticContext.NONE: + predicateEvaluationResult = c.semanticContext.eval(self.parser, outerContext) + if predicateEvaluationResult: + succeeded.add(c) + else: + failed.add(c) + else: + succeeded.add(c) + return (succeeded,failed) + + # Look through a list of predicate/alt pairs, returning alts for the + # pairs that win. A {@code NONE} predicate indicates an alt containing an + # unpredicated config which behaves as "always true." If !complete + # then we stop at the first predicate that evaluates to true. This + # includes pairs with null predicates. + # + def evalSemanticContext(self, predPredictions:list, outerContext:ParserRuleContext, complete:bool): + predictions = set() + for pair in predPredictions: + if pair.pred is SemanticContext.NONE: + predictions.add(pair.alt) + if not complete: + break + continue + predicateEvaluationResult = pair.pred.eval(self.parser, outerContext) + if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug: + print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult)) + + if predicateEvaluationResult: + if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug: + print("PREDICT " + str(pair.alt)) + predictions.add(pair.alt) + if not complete: + break + return predictions + + + # TODO: If we are doing predicates, there is no point in pursuing + # closure operations if we reach a DFA state that uniquely predicts + # alternative. We will not be caching that DFA state and it is a + # waste to pursue the closure. Might have to advance when we do + # ambig detection thought :( + # + + def closure(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, treatEofAsEpsilon:bool): + initialDepth = 0 + self.closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEofAsEpsilon) + + + def closureCheckingStopState(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool): + if ParserATNSimulator.debug: + print("closure(" + str(config) + ")") + + if isinstance(config.state, RuleStopState): + # We hit rule end. If we have context info, use it + # run thru all possible stack tops in ctx + if not config.context.isEmpty(): + for i in range(0, len(config.context)): + state = config.context.getReturnState(i) + if state is PredictionContext.EMPTY_RETURN_STATE: + if fullCtx: + configs.add(ATNConfig(state=config.state, context=PredictionContext.EMPTY, config=config), self.mergeCache) + continue + else: + # we have no context info, just chase follow links (if greedy) + if ParserATNSimulator.debug: + print("FALLING off rule " + self.getRuleName(config.state.ruleIndex)) + self.closure_(config, configs, closureBusy, collectPredicates, + fullCtx, depth, treatEofAsEpsilon) + continue + returnState = self.atn.states[state] + newContext = config.context.getParent(i) # "pop" return state + c = ATNConfig(state=returnState, alt=config.alt, context=newContext, semantic=config.semanticContext) + # While we have context to pop back from, we may have + # gotten that context AFTER having falling off a rule. + # Make sure we track that we are now out of context. + c.reachesIntoOuterContext = config.reachesIntoOuterContext + self.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth - 1, treatEofAsEpsilon) + return + elif fullCtx: + # reached end of start rule + configs.add(config, self.mergeCache) + return + else: + # else if we have no context info, just chase follow links (if greedy) + if ParserATNSimulator.debug: + print("FALLING off rule " + self.getRuleName(config.state.ruleIndex)) + + self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon) + + # Do the actual work of walking epsilon edges# + def closure_(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool): + p = config.state + # optimization + if not p.epsilonOnlyTransitions: + configs.add(config, self.mergeCache) + # make sure to not return here, because EOF transitions can act as + # both epsilon transitions and non-epsilon transitions. + + first = True + for t in p.transitions: + if first: + first = False + if self.canDropLoopEntryEdgeInLeftRecursiveRule(config): + continue + + continueCollecting = collectPredicates and not isinstance(t, ActionTransition) + c = self.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEofAsEpsilon) + if c is not None: + newDepth = depth + if isinstance( config.state, RuleStopState): + # target fell off end of rule; mark resulting c as having dipped into outer context + # We can't get here if incoming config was rule stop and we had context + # track how far we dip into outer context. Might + # come in handy and we avoid evaluating context dependent + # preds if this is > 0. + if self._dfa is not None and self._dfa.precedenceDfa: + if t.outermostPrecedenceReturn == self._dfa.atnStartState.ruleIndex: + c.precedenceFilterSuppressed = True + c.reachesIntoOuterContext += 1 + if c in closureBusy: + # avoid infinite recursion for right-recursive rules + continue + closureBusy.add(c) + configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method + newDepth -= 1 + if ParserATNSimulator.debug: + print("dips into outer ctx: " + str(c)) + else: + if not t.isEpsilon: + if c in closureBusy: + # avoid infinite recursion for EOF* and EOF+ + continue + closureBusy.add(c) + if isinstance(t, RuleTransition): + # latch when newDepth goes negative - once we step out of the entry context we can't return + if newDepth >= 0: + newDepth += 1 + + self.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEofAsEpsilon) + + + + # Implements first-edge (loop entry) elimination as an optimization + # during closure operations. See antlr/antlr4#1398. + # + # The optimization is to avoid adding the loop entry config when + # the exit path can only lead back to the same + # StarLoopEntryState after popping context at the rule end state + # (traversing only epsilon edges, so we're still in closure, in + # this same rule). + # + # We need to detect any state that can reach loop entry on + # epsilon w/o exiting rule. We don't have to look at FOLLOW + # links, just ensure that all stack tops for config refer to key + # states in LR rule. + # + # To verify we are in the right situation we must first check + # closure is at a StarLoopEntryState generated during LR removal. + # Then we check that each stack top of context is a return state + # from one of these cases: + # + # 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state + # 2. expr op expr. The return state is the block end of internal block of (...)* + # 3. 'between' expr 'and' expr. The return state of 2nd expr reference. + # That state points at block end of internal block of (...)*. + # 4. expr '?' expr ':' expr. The return state points at block end, + # which points at loop entry state. + # + # If any is true for each stack top, then closure does not add a + # config to the current config set for edge[0], the loop entry branch. + # + # Conditions fail if any context for the current config is: + # + # a. empty (we'd fall out of expr to do a global FOLLOW which could + # even be to some weird spot in expr) or, + # b. lies outside of expr or, + # c. lies within expr but at a state not the BlockEndState + # generated during LR removal + # + # Do we need to evaluate predicates ever in closure for this case? + # + # No. Predicates, including precedence predicates, are only + # evaluated when computing a DFA start state. I.e., only before + # the lookahead (but not parser) consumes a token. + # + # There are no epsilon edges allowed in LR rule alt blocks or in + # the "primary" part (ID here). If closure is in + # StarLoopEntryState any lookahead operation will have consumed a + # token as there are no epsilon-paths that lead to + # StarLoopEntryState. We do not have to evaluate predicates + # therefore if we are in the generated StarLoopEntryState of a LR + # rule. Note that when making a prediction starting at that + # decision point, decision d=2, compute-start-state performs + # closure starting at edges[0], edges[1] emanating from + # StarLoopEntryState. That means it is not performing closure on + # StarLoopEntryState during compute-start-state. + # + # How do we know this always gives same prediction answer? + # + # Without predicates, loop entry and exit paths are ambiguous + # upon remaining input +b (in, say, a+b). Either paths lead to + # valid parses. Closure can lead to consuming + immediately or by + # falling out of this call to expr back into expr and loop back + # again to StarLoopEntryState to match +b. In this special case, + # we choose the more efficient path, which is to take the bypass + # path. + # + # The lookahead language has not changed because closure chooses + # one path over the other. Both paths lead to consuming the same + # remaining input during a lookahead operation. If the next token + # is an operator, lookahead will enter the choice block with + # operators. If it is not, lookahead will exit expr. Same as if + # closure had chosen to enter the choice block immediately. + # + # Closure is examining one config (some loopentrystate, some alt, + # context) which means it is considering exactly one alt. Closure + # always copies the same alt to any derived configs. + # + # How do we know this optimization doesn't mess up precedence in + # our parse trees? + # + # Looking through expr from left edge of stat only has to confirm + # that an input, say, a+b+c; begins with any valid interpretation + # of an expression. The precedence actually doesn't matter when + # making a decision in stat seeing through expr. It is only when + # parsing rule expr that we must use the precedence to get the + # right interpretation and, hence, parse tree. + # + # @since 4.6 + # + def canDropLoopEntryEdgeInLeftRecursiveRule(self, config): + # return False + p = config.state + # First check to see if we are in StarLoopEntryState generated during + # left-recursion elimination. For efficiency, also check if + # the context has an empty stack case. If so, it would mean + # global FOLLOW so we can't perform optimization + # Are we the special loop entry/exit state? or SLL wildcard + if p.stateType != ATNState.STAR_LOOP_ENTRY \ + or not p.isPrecedenceDecision \ + or config.context.isEmpty() \ + or config.context.hasEmptyPath(): + return False + + # Require all return states to return back to the same rule + # that p is in. + numCtxs = len(config.context) + for i in range(0, numCtxs): # for each stack context + returnState = self.atn.states[config.context.getReturnState(i)] + if returnState.ruleIndex != p.ruleIndex: + return False + + decisionStartState = p.transitions[0].target + blockEndStateNum = decisionStartState.endState.stateNumber + blockEndState = self.atn.states[blockEndStateNum] + + # Verify that the top of each stack context leads to loop entry/exit + # state through epsilon edges and w/o leaving rule. + for i in range(0, numCtxs): # for each stack context + returnStateNumber = config.context.getReturnState(i) + returnState = self.atn.states[returnStateNumber] + # all states must have single outgoing epsilon edge + if len(returnState.transitions) != 1 or not returnState.transitions[0].isEpsilon: + return False + + # Look for prefix op case like 'not expr', (' type ')' expr + returnStateTarget = returnState.transitions[0].target + if returnState.stateType == ATNState.BLOCK_END and returnStateTarget is p: + continue + + # Look for 'expr op expr' or case where expr's return state is block end + # of (...)* internal block; the block end points to loop back + # which points to p but we don't need to check that + if returnState is blockEndState: + continue + + # Look for ternary expr ? expr : expr. The return state points at block end, + # which points at loop entry state + if returnStateTarget is blockEndState: + continue + + # Look for complex prefix 'between expr and expr' case where 2nd expr's + # return state points at block end state of (...)* internal block + if returnStateTarget.stateType == ATNState.BLOCK_END \ + and len(returnStateTarget.transitions) == 1 \ + and returnStateTarget.transitions[0].isEpsilon \ + and returnStateTarget.transitions[0].target is p: + continue + + # anything else ain't conforming + return False + + return True + + + def getRuleName(self, index:int): + if self.parser is not None and index>=0: + return self.parser.ruleNames[index] + else: + return "If {@code to} is {@code null}, this method returns {@code null}. + # Otherwise, this method returns the {@link DFAState} returned by calling + # {@link #addDFAState} for the {@code to} state.
+ # + # @param dfa The DFA + # @param from The source state for the edge + # @param t The input symbol + # @param to The target state for the edge + # + # @return If {@code to} is {@code null}, this method returns {@code null}; + # otherwise this method returns the result of calling {@link #addDFAState} + # on {@code to} + # + def addDFAEdge(self, dfa:DFA, from_:DFAState, t:int, to:DFAState): + if ParserATNSimulator.debug: + print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t)) + + if to is None: + return None + + to = self.addDFAState(dfa, to) # used existing if possible not incoming + if from_ is None or t < -1 or t > self.atn.maxTokenType: + return to + + if from_.edges is None: + from_.edges = [None] * (self.atn.maxTokenType + 2) + from_.edges[t+1] = to # connect + + if ParserATNSimulator.debug: + names = None if self.parser is None else self.parser.literalNames + print("DFA=\n" + dfa.toString(names)) + + return to + + # + # Add state {@code D} to the DFA if it is not already present, and return + # the actual instance stored in the DFA. If a state equivalent to {@code D} + # is already in the DFA, the existing state is returned. Otherwise this + # method returns {@code D} after adding it to the DFA. + # + #If {@code D} is {@link #ERROR}, this method returns {@link #ERROR} and + # does not change the DFA.
+ # + # @param dfa The dfa + # @param D The DFA state to add + # @return The state stored in the DFA. This will be either the existing + # state if {@code D} is already in the DFA, or {@code D} itself if the + # state was not already present. + # + def addDFAState(self, dfa:DFA, D:DFAState): + if D is self.ERROR: + return D + + + existing = dfa.states.get(D, None) + if existing is not None: + return existing + + D.stateNumber = len(dfa.states) + if not D.configs.readonly: + D.configs.optimizeConfigs(self) + D.configs.setReadonly(True) + dfa.states[D] = D + if ParserATNSimulator.debug: + print("adding new DFA state: " + str(D)) + return D + + def reportAttemptingFullContext(self, dfa:DFA, conflictingAlts:set, configs:ATNConfigSet, startIndex:int, stopIndex:int): + if ParserATNSimulator.debug or ParserATNSimulator.retry_debug: + print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) + + ", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex)) + if self.parser is not None: + self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + + def reportContextSensitivity(self, dfa:DFA, prediction:int, configs:ATNConfigSet, startIndex:int, stopIndex:int): + if ParserATNSimulator.debug or ParserATNSimulator.retry_debug: + print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) + + ", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex)) + if self.parser is not None: + self.parser.getErrorListenerDispatch().reportContextSensitivity(self.parser, dfa, startIndex, stopIndex, prediction, configs) + + # If context sensitive parsing, we know it's ambiguity not conflict# + def reportAmbiguity(self, dfa:DFA, D:DFAState, startIndex:int, stopIndex:int, + exact:bool, ambigAlts:set, configs:ATNConfigSet ): + if ParserATNSimulator.debug or ParserATNSimulator.retry_debug: +# ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn); +# int i = 1; +# for (Transition t : dfa.atnStartState.transitions) { +# print("ALT "+i+"="); +# print(startIndex+".."+stopIndex+", len(input)="+parser.getInputStream().size()); +# TraceTree path = finder.trace(t.target, parser.getContext(), (TokenStream)parser.getInputStream(), +# startIndex, stopIndex); +# if ( path!=null ) { +# print("path = "+path.toStringTree()); +# for (TraceTree leaf : path.leaves) { +# List+ # When using this prediction mode, the parser will either return a correct + # parse tree (i.e. the same parse tree that would be returned with the + # {@link #LL} prediction mode), or it will report a syntax error. If a + # syntax error is encountered when using the {@link #SLL} prediction mode, + # it may be due to either an actual syntax error in the input or indicate + # that the particular combination of grammar and input requires the more + # powerful {@link #LL} prediction abilities to complete successfully.
+ # + #+ # This prediction mode does not provide any guarantees for prediction + # behavior for syntactically-incorrect inputs.
+ # + SLL = 0 + # + # The LL(*) prediction mode. This prediction mode allows the current parser + # context to be used for resolving SLL conflicts that occur during + # prediction. This is the fastest prediction mode that guarantees correct + # parse results for all combinations of grammars with syntactically correct + # inputs. + # + #+ # When using this prediction mode, the parser will make correct decisions + # for all syntactically-correct grammar and input combinations. However, in + # cases where the grammar is truly ambiguous this prediction mode might not + # report a precise answer for exactly which alternatives are + # ambiguous.
+ # + #+ # This prediction mode does not provide any guarantees for prediction + # behavior for syntactically-incorrect inputs.
+ # + LL = 1 + # + # The LL(*) prediction mode with exact ambiguity detection. In addition to + # the correctness guarantees provided by the {@link #LL} prediction mode, + # this prediction mode instructs the prediction algorithm to determine the + # complete and exact set of ambiguous alternatives for every ambiguous + # decision encountered while parsing. + # + #+ # This prediction mode may be used for diagnosing ambiguities during + # grammar development. Due to the performance overhead of calculating sets + # of ambiguous alternatives, this prediction mode should be avoided when + # the exact results are not necessary.
+ # + #+ # This prediction mode does not provide any guarantees for prediction + # behavior for syntactically-incorrect inputs.
+ # + LL_EXACT_AMBIG_DETECTION = 2 + + + # + # Computes the SLL prediction termination condition. + # + #+ # This method computes the SLL prediction termination condition for both of + # the following cases.
+ # + #COMBINED SLL+LL PARSING
+ # + #When LL-fallback is enabled upon SLL conflict, correct predictions are + # ensured regardless of how the termination condition is computed by this + # method. Due to the substantially higher cost of LL prediction, the + # prediction should only fall back to LL when the additional lookahead + # cannot lead to a unique SLL prediction.
+ # + #Assuming combined SLL+LL parsing, an SLL configuration set with only + # conflicting subsets should fall back to full LL, even if the + # configuration sets don't resolve to the same alternative (e.g. + # {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting + # configuration, SLL could continue with the hopes that more lookahead will + # resolve via one of those non-conflicting configurations.
+ # + #Here's the prediction termination rule them: SLL (for SLL+LL parsing) + # stops when it sees only conflicting configuration subsets. In contrast, + # full LL keeps going when there is uncertainty.
+ # + #HEURISTIC
+ # + #As a heuristic, we stop prediction when we see any conflicting subset + # unless we see a state that only has one alternative associated with it. + # The single-alt-state thing lets prediction continue upon rules like + # (otherwise, it would admit defeat too soon):
+ # + #{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}
+ # + #When the ATN simulation reaches the state before {@code ';'}, it has a + # DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally + # {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop + # processing this node because alternative to has another way to continue, + # via {@code [6|2|[]]}.
+ # + #It also let's us continue for this rule:
+ # + #{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}
+ # + #After matching input A, we reach the stop state for rule A, state 1. + # State 8 is the state right before B. Clearly alternatives 1 and 2 + # conflict and no amount of further lookahead will separate the two. + # However, alternative 3 will be able to continue and so we do not stop + # working on this state. In the previous example, we're concerned with + # states associated with the conflicting alternatives. Here alt 3 is not + # associated with the conflicting configs, but since we can continue + # looking for input reasonably, don't declare the state done.
+ # + #PURE SLL PARSING
+ # + #To handle pure SLL parsing, all we have to do is make sure that we + # combine stack contexts for configurations that differ only by semantic + # predicate. From there, we can do the usual SLL termination heuristic.
+ # + #PREDICATES IN SLL+LL PARSING
+ # + #SLL decisions don't evaluate predicates until after they reach DFA stop + # states because they need to create the DFA cache that works in all + # semantic situations. In contrast, full LL evaluates predicates collected + # during start state computation so it can ignore predicates thereafter. + # This means that SLL termination detection can totally ignore semantic + # predicates.
+ # + #Implementation-wise, {@link ATNConfigSet} combines stack contexts but not + # semantic predicate contexts so we might see two configurations like the + # following.
+ # + #{@code (s, 1, x, {}), (s, 1, x', {p})}
+ # + #Before testing these configurations against others, we have to merge + # {@code x} and {@code x'} (without modifying the existing configurations). + # For example, we test {@code (x+x')==x''} when looking for conflicts in + # the following configurations.
+ # + #{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
+ # + #If the configuration set has predicates (as indicated by + # {@link ATNConfigSet#hasSemanticContext}), this algorithm makes a copy of + # the configurations to strip out all of the predicates so that a standard + # {@link ATNConfigSet} will merge everything ignoring predicates.
+ # + @classmethod + def hasSLLConflictTerminatingPrediction(cls, mode:PredictionMode, configs:ATNConfigSet): + # Configs in rule stop states indicate reaching the end of the decision + # rule (local context) or end of start rule (full context). If all + # configs meet this condition, then none of the configurations is able + # to match additional input so we terminate prediction. + # + if cls.allConfigsInRuleStopStates(configs): + return True + + # pure SLL mode parsing + if mode == PredictionMode.SLL: + # Don't bother with combining configs from different semantic + # contexts if we can fail over to full LL; costs more time + # since we'll often fail over anyway. + if configs.hasSemanticContext: + # dup configs, tossing out semantic predicates + dup = ATNConfigSet() + for c in configs: + c = ATNConfig(config=c, semantic=SemanticContext.NONE) + dup.add(c) + configs = dup + # now we have combined contexts for configs with dissimilar preds + + # pure SLL or combined SLL+LL mode parsing + altsets = cls.getConflictingAltSubsets(configs) + return cls.hasConflictingAltSet(altsets) and not cls.hasStateAssociatedWithOneAlt(configs) + + # Checks if any configuration in {@code configs} is in a + # {@link RuleStopState}. Configurations meeting this condition have reached + # the end of the decision rule (local context) or end of start rule (full + # context). + # + # @param configs the configuration set to test + # @return {@code true} if any configuration in {@code configs} is in a + # {@link RuleStopState}, otherwise {@code false} + @classmethod + def hasConfigInRuleStopState(cls, configs:ATNConfigSet): + return any(isinstance(cfg.state, RuleStopState) for cfg in configs) + + # Checks if all configurations in {@code configs} are in a + # {@link RuleStopState}. Configurations meeting this condition have reached + # the end of the decision rule (local context) or end of start rule (full + # context). + # + # @param configs the configuration set to test + # @return {@code true} if all configurations in {@code configs} are in a + # {@link RuleStopState}, otherwise {@code false} + @classmethod + def allConfigsInRuleStopStates(cls, configs:ATNConfigSet): + return all(isinstance(cfg.state, RuleStopState) for cfg in configs) + + # + # Full LL prediction termination. + # + #Can we stop looking ahead during ATN simulation or is there some + # uncertainty as to which alternative we will ultimately pick, after + # consuming more input? Even if there are partial conflicts, we might know + # that everything is going to resolve to the same minimum alternative. That + # means we can stop since no more lookahead will change that fact. On the + # other hand, there might be multiple conflicts that resolve to different + # minimums. That means we need more look ahead to decide which of those + # alternatives we should predict.
+ # + #The basic idea is to split the set of configurations {@code C}, into + # conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with + # non-conflicting configurations. Two configurations conflict if they have + # identical {@link ATNConfig#state} and {@link ATNConfig#context} values + # but different {@link ATNConfig#alt} value, e.g. {@code (s, i, ctx, _)} + # and {@code (s, j, ctx, _)} for {@code i!=j}.
+ # + #Reduce these configuration subsets to the set of possible alternatives. + # You can compute the alternative subsets in one pass as follows:
+ # + #{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in + # {@code C} holding {@code s} and {@code ctx} fixed.
+ # + #Or in pseudo-code, for each configuration {@code c} in {@code C}:
+ # + #
+ # map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
+ # alt and not pred
+ #
+ #
+ # The values in {@code map} are the set of {@code A_s,ctx} sets.
+ # + #If {@code |A_s,ctx|=1} then there is no conflict associated with + # {@code s} and {@code ctx}.
+ # + #Reduce the subsets to singletons by choosing a minimum of each subset. If + # the union of these alternative subsets is a singleton, then no amount of + # more lookahead will help us. We will always pick that alternative. If, + # however, there is more than one alternative, then we are uncertain which + # alternative to predict and must continue looking for resolution. We may + # or may not discover an ambiguity in the future, even if there are no + # conflicting subsets this round.
+ # + #The biggest sin is to terminate early because it means we've made a + # decision but were uncertain as to the eventual outcome. We haven't used + # enough lookahead. On the other hand, announcing a conflict too late is no + # big deal; you will still have the conflict. It's just inefficient. It + # might even look until the end of file.
+ # + #No special consideration for semantic predicates is required because + # predicates are evaluated on-the-fly for full LL prediction, ensuring that + # no configuration contains a semantic context during the termination + # check.
+ # + #CONFLICTING CONFIGS
+ # + #Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict + # when {@code i!=j} but {@code x=x'}. Because we merge all + # {@code (s, i, _)} configurations together, that means that there are at + # most {@code n} configurations associated with state {@code s} for + # {@code n} possible alternatives in the decision. The merged stacks + # complicate the comparison of configuration contexts {@code x} and + # {@code x'}. Sam checks to see if one is a subset of the other by calling + # merge and checking to see if the merged result is either {@code x} or + # {@code x'}. If the {@code x} associated with lowest alternative {@code i} + # is the superset, then {@code i} is the only possible prediction since the + # others resolve to {@code min(i)} as well. However, if {@code x} is + # associated with {@code j>i} then at least one stack configuration for + # {@code j} is not in conflict with alternative {@code i}. The algorithm + # should keep going, looking for more lookahead due to the uncertainty.
+ # + #For simplicity, I'm doing a equality check between {@code x} and + # {@code x'} that lets the algorithm continue to consume lookahead longer + # than necessary. The reason I like the equality is of course the + # simplicity but also because that is the test you need to detect the + # alternatives that are actually in conflict.
+ # + #CONTINUE/STOP RULE
+ # + #Continue if union of resolved alternative sets from non-conflicting and + # conflicting alternative subsets has more than one alternative. We are + # uncertain about which alternative to predict.
+ # + #The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which + # alternatives are still in the running for the amount of input we've + # consumed at this point. The conflicting sets let us to strip away + # configurations that won't lead to more states because we resolve + # conflicts to the configuration with a minimum alternate for the + # conflicting set.
+ # + #CASES
+ # + #EXACT AMBIGUITY DETECTION
+ # + #If all states report the same conflicting set of alternatives, then we + # know we have the exact ambiguity set.
+ # + #|A_i|>1 and
+ # A_i = A_j for all i, j.
In other words, we continue examining lookahead until all {@code A_i} + # have more than one alternative and all {@code A_i} are the same. If + # {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate + # because the resolved set is {@code {1}}. To determine what the real + # ambiguity is, we have to know whether the ambiguity is between one and + # two or one and three so we keep going. We can only stop prediction when + # we need exact ambiguity detection when the sets look like + # {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
+ # + @classmethod + def resolvesToJustOneViableAlt(cls, altsets:list): + return cls.getSingleViableAlt(altsets) + + # + # Determines if every alternative subset in {@code altsets} contains more + # than one alternative. + # + # @param altsets a collection of alternative subsets + # @return {@code true} if every {@link BitSet} in {@code altsets} has + # {@link BitSet#cardinality cardinality} > 1, otherwise {@code false} + # + @classmethod + def allSubsetsConflict(cls, altsets:list): + return not cls.hasNonConflictingAltSet(altsets) + + # + # Determines if any single alternative subset in {@code altsets} contains + # exactly one alternative. + # + # @param altsets a collection of alternative subsets + # @return {@code true} if {@code altsets} contains a {@link BitSet} with + # {@link BitSet#cardinality cardinality} 1, otherwise {@code false} + # + @classmethod + def hasNonConflictingAltSet(cls, altsets:list): + return any(len(alts) == 1 for alts in altsets) + + # + # Determines if any single alternative subset in {@code altsets} contains + # more than one alternative. + # + # @param altsets a collection of alternative subsets + # @return {@code true} if {@code altsets} contains a {@link BitSet} with + # {@link BitSet#cardinality cardinality} > 1, otherwise {@code false} + # + @classmethod + def hasConflictingAltSet(cls, altsets:list): + return any(len(alts) > 1 for alts in altsets) + + # + # Determines if every alternative subset in {@code altsets} is equivalent. + # + # @param altsets a collection of alternative subsets + # @return {@code true} if every member of {@code altsets} is equal to the + # others, otherwise {@code false} + # + @classmethod + def allSubsetsEqual(cls, altsets:list): + if not altsets: + return True + first = next(iter(altsets)) + return all(alts == first for alts in iter(altsets)) + + # + # Returns the unique alternative predicted by all alternative subsets in + # {@code altsets}. If no such alternative exists, this method returns + # {@link ATN#INVALID_ALT_NUMBER}. + # + # @param altsets a collection of alternative subsets + # + @classmethod + def getUniqueAlt(cls, altsets:list): + all = cls.getAlts(altsets) + if len(all)==1: + return next(iter(all)) + return ATN.INVALID_ALT_NUMBER + + # Gets the complete set of represented alternatives for a collection of + # alternative subsets. This method returns the union of each {@link BitSet} + # in {@code altsets}. + # + # @param altsets a collection of alternative subsets + # @return the set of represented alternatives in {@code altsets} + # + @classmethod + def getAlts(cls, altsets:list): + return set.union(*altsets) + + # + # This function gets the conflicting alt subsets from a configuration set. + # For each configuration {@code c} in {@code configs}: + # + #
+ # map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
+ # alt and not pred
+ #
+ #
+ @classmethod
+ def getConflictingAltSubsets(cls, configs:ATNConfigSet):
+ configToAlts = dict()
+ for c in configs:
+ h = hash((c.state.stateNumber, c.context))
+ alts = configToAlts.get(h, None)
+ if alts is None:
+ alts = set()
+ configToAlts[h] = alts
+ alts.add(c.alt)
+ return configToAlts.values()
+
+ #
+ # Get a map from state to alt subset from a configuration set. For each
+ # configuration {@code c} in {@code configs}:
+ #
+ #
+ # map[c.{@link ATNConfig#state state}] U= c.{@link ATNConfig#alt alt}
+ #
+ #
+ @classmethod
+ def getStateToAltMap(cls, configs:ATNConfigSet):
+ m = dict()
+ for c in configs:
+ alts = m.get(c.state, None)
+ if alts is None:
+ alts = set()
+ m[c.state] = alts
+ alts.add(c.alt)
+ return m
+
+ @classmethod
+ def hasStateAssociatedWithOneAlt(cls, configs:ATNConfigSet):
+ return any(len(alts) == 1 for alts in cls.getStateToAltMap(configs).values())
+
+ @classmethod
+ def getSingleViableAlt(cls, altsets:list):
+ viableAlts = set()
+ for alts in altsets:
+ minAlt = min(alts)
+ viableAlts.add(minAlt)
+ if len(viableAlts)>1 : # more than 1 viable alt
+ return ATN.INVALID_ALT_NUMBER
+ return min(viableAlts)
diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/SemanticContext.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/SemanticContext.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f4dc31088d35b73304432c46c54e31c1ab92700
--- /dev/null
+++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/SemanticContext.py
@@ -0,0 +1,323 @@
+#
+# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+# Use of this file is governed by the BSD 3-clause license that
+# can be found in the LICENSE.txt file in the project root.
+#
+
+# A tree structure used to record the semantic context in which
+# an ATN configuration is valid. It's either a single predicate,
+# a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
+#
+# I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of +# {@link SemanticContext} within the scope of this outer class.
+# +from antlr4.Recognizer import Recognizer +from antlr4.RuleContext import RuleContext +from io import StringIO + + +class SemanticContext(object): + # + # The default {@link SemanticContext}, which is semantically equivalent to + # a predicate of the form {@code {true}?}. + # + NONE = None + + # + # For context independent predicates, we evaluate them without a local + # context (i.e., null context). That way, we can evaluate them without + # having to create proper rule-specific context during prediction (as + # opposed to the parser, which creates them naturally). In a practical + # sense, this avoids a cast exception from RuleContext to myruleContext. + # + #For context dependent predicates, we must pass in a local context so that + # references such as $arg evaluate properly as _localctx.arg. We only + # capture context dependent predicates in the context in which we begin + # prediction, so we passed in the outer context here in case of context + # dependent predicate evaluation.
+ # + def eval(self, parser:Recognizer , outerContext:RuleContext ): + pass + + # + # Evaluate the precedence predicates for the context and reduce the result. + # + # @param parser The parser instance. + # @param outerContext The current parser context object. + # @return The simplified semantic context after precedence predicates are + # evaluated, which will be one of the following values. + #+ # The evaluation of predicates by this context is short-circuiting, but + # unordered.
+ # + def eval(self, parser:Recognizer, outerContext:RuleContext): + return all(opnd.eval(parser, outerContext) for opnd in self.opnds) + + def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext): + differs = False + operands = [] + for context in self.opnds: + evaluated = context.evalPrecedence(parser, outerContext) + differs |= evaluated is not context + if evaluated is None: + # The AND context is false if any element is false + return None + elif evaluated is not SemanticContext.NONE: + # Reduce the result by skipping true elements + operands.append(evaluated) + + if not differs: + return self + + if len(operands)==0: + # all elements were true, so the AND context is true + return SemanticContext.NONE + + result = None + for o in operands: + result = o if result is None else andContext(result, o) + + return result + + def __str__(self): + with StringIO() as buf: + first = True + for o in self.opnds: + if not first: + buf.write("&&") + buf.write(str(o)) + first = False + return buf.getvalue() + +# +# A semantic context which is true whenever at least one of the contained +# contexts is true. +del OR +class OR (SemanticContext): + __slots__ = 'opnds' + + def __init__(self, a:SemanticContext, b:SemanticContext): + operands = set() + if isinstance( a, OR ): + operands.update(a.opnds) + else: + operands.add(a) + if isinstance( b, OR ): + operands.update(b.opnds) + else: + operands.add(b) + + precedencePredicates = filterPrecedencePredicates(operands) + if len(precedencePredicates)>0: + # interested in the transition with the highest precedence + s = sorted(precedencePredicates) + reduced = s[-1] + operands.add(reduced) + + self.opnds = list(operands) + + def __eq__(self, other): + if self is other: + return True + elif not isinstance(other, OR): + return False + else: + return self.opnds == other.opnds + + def __hash__(self): + h = 0 + for o in self.opnds: + h = hash((h, o)) + return hash((h, "OR")) + + #+ # The evaluation of predicates by this context is short-circuiting, but + # unordered.
+ # + def eval(self, parser:Recognizer, outerContext:RuleContext): + return any(opnd.eval(parser, outerContext) for opnd in self.opnds) + + def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext): + differs = False + operands = [] + for context in self.opnds: + evaluated = context.evalPrecedence(parser, outerContext) + differs |= evaluated is not context + if evaluated is SemanticContext.NONE: + # The OR context is true if any element is true + return SemanticContext.NONE + elif evaluated is not None: + # Reduce the result by skipping false elements + operands.append(evaluated) + + if not differs: + return self + + if len(operands)==0: + # all elements were false, so the OR context is false + return None + + result = None + for o in operands: + result = o if result is None else orContext(result, o) + + return result + + def __str__(self): + with StringIO() as buf: + first = True + for o in self.opnds: + if not first: + buf.write("||") + buf.write(str(o)) + first = False + return buf.getvalue() + + +SemanticContext.NONE = Predicate() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/Transition.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/Transition.py new file mode 100644 index 0000000000000000000000000000000000000000..2e4c9971763c34dbb2690660434c5c99d44193e1 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/Transition.py @@ -0,0 +1,268 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# An ATN transition between any two ATN states. Subclasses define +# atom, set, epsilon, action, predicate, rule transitions. +# +#This is a one way link. It emanates from a state (usually via a list of +# transitions) and has a target state.
+# +#Since we never have to change the ATN transitions once we construct it, +# we can fix these transitions as specific classes. The DFA transitions +# on the other hand need to update the labels as it adds transitions to +# the states. We'll use the term Edge for the DFA to distinguish them from +# ATN transitions.
+# +from antlr4.IntervalSet import IntervalSet +from antlr4.Token import Token + +# need forward declarations +from antlr4.atn.SemanticContext import Predicate, PrecedencePredicate + +ATNState = None +RuleStartState = None + +class Transition (object): + __slots__ = ('target','isEpsilon','label') + + # constants for serialization + EPSILON = 1 + RANGE = 2 + RULE = 3 + PREDICATE = 4 # e.g., {isType(input.LT(1))}? + ATOM = 5 + ACTION = 6 + SET = 7 # ~(A|B) or ~atom, wildcard, which convert to next 2 + NOT_SET = 8 + WILDCARD = 9 + PRECEDENCE = 10 + + serializationNames = [ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE" + ] + + serializationTypes = dict() + + def __init__(self, target:ATNState): + # The target of this transition. + if target is None: + raise Exception("target cannot be null.") + self.target = target + # Are we epsilon, action, sempred? + self.isEpsilon = False + self.label = None + + +# TODO: make all transitions sets? no, should remove set edges +class AtomTransition(Transition): + __slots__ = ('label_', 'serializationType') + + def __init__(self, target:ATNState, label:int): + super().__init__(target) + self.label_ = label # The token type or character value; or, signifies special label. + self.label = self.makeLabel() + self.serializationType = self.ATOM + + def makeLabel(self): + s = IntervalSet() + s.addOne(self.label_) + return s + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return self.label_ == symbol + + def __str__(self): + return str(self.label_) + +class RuleTransition(Transition): + __slots__ = ('ruleIndex', 'precedence', 'followState', 'serializationType') + + def __init__(self, ruleStart:RuleStartState, ruleIndex:int, precedence:int, followState:ATNState): + super().__init__(ruleStart) + self.ruleIndex = ruleIndex # ptr to the rule definition object for this rule ref + self.precedence = precedence + self.followState = followState # what node to begin computations following ref to rule + self.serializationType = self.RULE + self.isEpsilon = True + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return False + + +class EpsilonTransition(Transition): + __slots__ = ('serializationType', 'outermostPrecedenceReturn') + + def __init__(self, target, outermostPrecedenceReturn=-1): + super(EpsilonTransition, self).__init__(target) + self.serializationType = self.EPSILON + self.isEpsilon = True + self.outermostPrecedenceReturn = outermostPrecedenceReturn + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return False + + def __str__(self): + return "epsilon" + +class RangeTransition(Transition): + __slots__ = ('serializationType', 'start', 'stop') + + def __init__(self, target:ATNState, start:int, stop:int): + super().__init__(target) + self.serializationType = self.RANGE + self.start = start + self.stop = stop + self.label = self.makeLabel() + + def makeLabel(self): + s = IntervalSet() + s.addRange(range(self.start, self.stop + 1)) + return s + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return symbol >= self.start and symbol <= self.stop + + def __str__(self): + return "'" + chr(self.start) + "'..'" + chr(self.stop) + "'" + +class AbstractPredicateTransition(Transition): + + def __init__(self, target:ATNState): + super().__init__(target) + + +class PredicateTransition(AbstractPredicateTransition): + __slots__ = ('serializationType', 'ruleIndex', 'predIndex', 'isCtxDependent') + + def __init__(self, target:ATNState, ruleIndex:int, predIndex:int, isCtxDependent:bool): + super().__init__(target) + self.serializationType = self.PREDICATE + self.ruleIndex = ruleIndex + self.predIndex = predIndex + self.isCtxDependent = isCtxDependent # e.g., $i ref in pred + self.isEpsilon = True + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return False + + def getPredicate(self): + return Predicate(self.ruleIndex, self.predIndex, self.isCtxDependent) + + def __str__(self): + return "pred_" + str(self.ruleIndex) + ":" + str(self.predIndex) + +class ActionTransition(Transition): + __slots__ = ('serializationType', 'ruleIndex', 'actionIndex', 'isCtxDependent') + + def __init__(self, target:ATNState, ruleIndex:int, actionIndex:int=-1, isCtxDependent:bool=False): + super().__init__(target) + self.serializationType = self.ACTION + self.ruleIndex = ruleIndex + self.actionIndex = actionIndex + self.isCtxDependent = isCtxDependent # e.g., $i ref in pred + self.isEpsilon = True + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return False + + def __str__(self): + return "action_"+self.ruleIndex+":"+self.actionIndex + +# A transition containing a set of values. +class SetTransition(Transition): + __slots__ = 'serializationType' + + def __init__(self, target:ATNState, set:IntervalSet): + super().__init__(target) + self.serializationType = self.SET + if set is not None: + self.label = set + else: + self.label = IntervalSet() + self.label.addRange(range(Token.INVALID_TYPE, Token.INVALID_TYPE + 1)) + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return symbol in self.label + + def __str__(self): + return str(self.label) + +class NotSetTransition(SetTransition): + + def __init__(self, target:ATNState, set:IntervalSet): + super().__init__(target, set) + self.serializationType = self.NOT_SET + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return symbol >= minVocabSymbol \ + and symbol <= maxVocabSymbol \ + and not super(type(self), self).matches(symbol, minVocabSymbol, maxVocabSymbol) + + def __str__(self): + return '~' + super(type(self), self).__str__() + + +class WildcardTransition(Transition): + __slots__ = 'serializationType' + + def __init__(self, target:ATNState): + super().__init__(target) + self.serializationType = self.WILDCARD + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return symbol >= minVocabSymbol and symbol <= maxVocabSymbol + + def __str__(self): + return "." + + +class PrecedencePredicateTransition(AbstractPredicateTransition): + __slots__ = ('serializationType', 'precedence') + + def __init__(self, target:ATNState, precedence:int): + super().__init__(target) + self.serializationType = self.PRECEDENCE + self.precedence = precedence + self.isEpsilon = True + + def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): + return False + + + def getPredicate(self): + return PrecedencePredicate(self.precedence) + + def __str__(self): + return self.precedence + " >= _p" + + +Transition.serializationTypes = { + EpsilonTransition: Transition.EPSILON, + RangeTransition: Transition.RANGE, + RuleTransition: Transition.RULE, + PredicateTransition: Transition.PREDICATE, + AtomTransition: Transition.ATOM, + ActionTransition: Transition.ACTION, + SetTransition: Transition.SET, + NotSetTransition: Transition.NOT_SET, + WildcardTransition: Transition.WILDCARD, + PrecedencePredicateTransition: Transition.PRECEDENCE + } + +del ATNState +del RuleStartState + +from antlr4.atn.ATNState import * diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..216c000dc5ffc8e53cc9c596e420c1e67604d1aa --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/__init__.py @@ -0,0 +1 @@ +__author__ = 'ericvergnaud' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFA.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFA.py new file mode 100644 index 0000000000000000000000000000000000000000..d80589a6834a48cc6bb718bf33f57156e32ec934 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFA.py @@ -0,0 +1,133 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +from antlr4.atn.ATNState import StarLoopEntryState + +from antlr4.atn.ATNConfigSet import ATNConfigSet +from antlr4.atn.ATNState import DecisionState +from antlr4.dfa.DFAState import DFAState +from antlr4.error.Errors import IllegalStateException + + +class DFA(object): + __slots__ = ('atnStartState', 'decision', '_states', 's0', 'precedenceDfa') + + def __init__(self, atnStartState:DecisionState, decision:int=0): + # From which ATN state did we create this DFA? + self.atnStartState = atnStartState + self.decision = decision + # A set of all DFA states. Use {@link Map} so we can get old state back + # ({@link Set} only allows you to see if it's there). + self._states = dict() + self.s0 = None + # {@code true} if this DFA is for a precedence decision; otherwise, + # {@code false}. This is the backing field for {@link #isPrecedenceDfa}, + # {@link #setPrecedenceDfa}. + self.precedenceDfa = False + + if isinstance(atnStartState, StarLoopEntryState): + if atnStartState.isPrecedenceDecision: + self.precedenceDfa = True + precedenceState = DFAState(configs=ATNConfigSet()) + precedenceState.edges = [] + precedenceState.isAcceptState = False + precedenceState.requiresFullContext = False + self.s0 = precedenceState + + + # Get the start state for a specific precedence value. + # + # @param precedence The current precedence. + # @return The start state corresponding to the specified precedence, or + # {@code null} if no start state exists for the specified precedence. + # + # @throws IllegalStateException if this is not a precedence DFA. + # @see #isPrecedenceDfa() + + def getPrecedenceStartState(self, precedence:int): + if not self.precedenceDfa: + raise IllegalStateException("Only precedence DFAs may contain a precedence start state.") + + # s0.edges is never null for a precedence DFA + if precedence < 0 or precedence >= len(self.s0.edges): + return None + return self.s0.edges[precedence] + + # Set the start state for a specific precedence value. + # + # @param precedence The current precedence. + # @param startState The start state corresponding to the specified + # precedence. + # + # @throws IllegalStateException if this is not a precedence DFA. + # @see #isPrecedenceDfa() + # + def setPrecedenceStartState(self, precedence:int, startState:DFAState): + if not self.precedenceDfa: + raise IllegalStateException("Only precedence DFAs may contain a precedence start state.") + + if precedence < 0: + return + + # synchronization on s0 here is ok. when the DFA is turned into a + # precedence DFA, s0 will be initialized once and not updated again + # s0.edges is never null for a precedence DFA + if precedence >= len(self.s0.edges): + ext = [None] * (precedence + 1 - len(self.s0.edges)) + self.s0.edges.extend(ext) + self.s0.edges[precedence] = startState + # + # Sets whether this is a precedence DFA. If the specified value differs + # from the current DFA configuration, the following actions are taken; + # otherwise no changes are made to the current DFA. + # + #I use a set of ATNConfig objects not simple states. An ATNConfig +# is both a state (ala normal conversion) and a RuleContext describing +# the chain of rules (if any) followed to arrive at that state.
+# +#A DFA state may have multiple references to a particular state, +# but with different ATN contexts (with same or different alts) +# meaning that state was reached via a different set of rule invocations.
+#/ +class DFAState(object): + __slots__ = ( + 'stateNumber', 'configs', 'edges', 'isAcceptState', 'prediction', + 'lexerActionExecutor', 'requiresFullContext', 'predicates' + ) + + def __init__(self, stateNumber:int=-1, configs:ATNConfigSet=ATNConfigSet()): + self.stateNumber = stateNumber + self.configs = configs + # {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1) + # {@link Token#EOF} maps to {@code edges[0]}. + self.edges = None + self.isAcceptState = False + # if accept state, what ttype do we match or alt do we predict? + # This is set to {@link ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or + # {@link #requiresFullContext}. + self.prediction = 0 + self.lexerActionExecutor = None + # Indicates that this state was created during SLL prediction that + # discovered a conflict between the configurations in the state. Future + # {@link ParserATNSimulator#execATN} invocations immediately jumped doing + # full context prediction if this field is true. + self.requiresFullContext = False + # During SLL parsing, this is a list of predicates associated with the + # ATN configurations of the DFA state. When we have predicates, + # {@link #requiresFullContext} is {@code false} since full context prediction evaluates predicates + # on-the-fly. If this is not null, then {@link #prediction} is + # {@link ATN#INVALID_ALT_NUMBER}. + # + #We only use these for non-{@link #requiresFullContext} but conflicting states. That + # means we know from the context (it's $ or we don't dip into outer + # context) that it's an ambiguity not a conflict.
+ # + #This list is computed by {@link ParserATNSimulator#predicateDFAState}.
+ self.predicates = None + + + + # Get the set of all alts mentioned by all ATN configurations in this + # DFA state. + def getAltSet(self): + if self.configs is not None: + return set(cfg.alt for cfg in self.configs) or None + return None + + def __hash__(self): + return hash(self.configs) + + # Two {@link DFAState} instances are equal if their ATN configuration sets + # are the same. This method is used to see if a state already exists. + # + #Because the number of alternatives and number of ATN configurations are + # finite, there is a finite number of DFA states that can be processed. + # This is necessary to show that the algorithm terminates.
+ # + #Cannot test the DFA state numbers here because in + # {@link ParserATNSimulator#addDFAState} we need to know if any other state + # exists that has this exact set of ATN configurations. The + # {@link #stateNumber} is irrelevant.
+ def __eq__(self, other): + # compare set of ATN configurations in this set with other + if self is other: + return True + elif not isinstance(other, DFAState): + return False + else: + return self.configs==other.configs + + def __str__(self): + with StringIO() as buf: + buf.write(str(self.stateNumber)) + buf.write(":") + buf.write(str(self.configs)) + if self.isAcceptState: + buf.write("=>") + if self.predicates is not None: + buf.write(str(self.predicates)) + else: + buf.write(str(self.prediction)) + return buf.getvalue() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..216c000dc5ffc8e53cc9c596e420c1e67604d1aa --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/__init__.py @@ -0,0 +1 @@ +__author__ = 'ericvergnaud' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/DiagnosticErrorListener.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/DiagnosticErrorListener.py new file mode 100644 index 0000000000000000000000000000000000000000..32ac14b63579ce7c984c2e34f2b1c80bebe328ed --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/DiagnosticErrorListener.py @@ -0,0 +1,107 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + + +# +# This implementation of {@link ANTLRErrorListener} can be used to identify +# certain potential correctness and performance problems in grammars. "Reports" +# are made by calling {@link Parser#notifyErrorListeners} with the appropriate +# message. +# +#+ # This implementation prints messages to {@link System#err} containing the + # values of {@code line}, {@code charPositionInLine}, and {@code msg} using + # the following format.
+ # + #+ # line line:charPositionInLine msg + #+ # + def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e): + print("line " + str(line) + ":" + str(column) + " " + msg, file=sys.stderr) + +ConsoleErrorListener.INSTANCE = ConsoleErrorListener() + +class ProxyErrorListener(ErrorListener): + + def __init__(self, delegates): + super().__init__() + if delegates is None: + raise ReferenceError("delegates") + self.delegates = delegates + + def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e): + for delegate in self.delegates: + delegate.syntaxError(recognizer, offendingSymbol, line, column, msg, e) + + def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs): + for delegate in self.delegates: + delegate.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + + def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs): + for delegate in self.delegates: + delegate.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) + + def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs): + for delegate in self.delegates: + delegate.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorStrategy.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorStrategy.py new file mode 100644 index 0000000000000000000000000000000000000000..0f7caadb240445e6d997ad582a51836f95cab5c5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorStrategy.py @@ -0,0 +1,709 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# +import sys +from antlr4.IntervalSet import IntervalSet + +from antlr4.Token import Token +from antlr4.atn.ATNState import ATNState +from antlr4.error.Errors import RecognitionException, NoViableAltException, InputMismatchException, \ + FailedPredicateException, ParseCancellationException + +# need forward declaration +Parser = None + +class ErrorStrategy(object): + + def reset(self, recognizer:Parser): + pass + + def recoverInline(self, recognizer:Parser): + pass + + def recover(self, recognizer:Parser, e:RecognitionException): + pass + + def sync(self, recognizer:Parser): + pass + + def inErrorRecoveryMode(self, recognizer:Parser): + pass + + def reportError(self, recognizer:Parser, e:RecognitionException): + pass + + +# This is the default implementation of {@link ANTLRErrorStrategy} used for +# error reporting and recovery in ANTLR parsers. +# +class DefaultErrorStrategy(ErrorStrategy): + + def __init__(self): + super().__init__() + # Indicates whether the error strategy is currently "recovering from an + # error". This is used to suppress reporting multiple error messages while + # attempting to recover from a detected syntax error. + # + # @see #inErrorRecoveryMode + # + self.errorRecoveryMode = False + + # The index into the input stream where the last error occurred. + # This is used to prevent infinite loops where an error is found + # but no token is consumed during recovery...another error is found, + # ad nauseum. This is a failsafe mechanism to guarantee that at least + # one token/tree node is consumed for two errors. + # + self.lastErrorIndex = -1 + self.lastErrorStates = None + self.nextTokensContext = None + self.nextTokenState = 0 + + #
The default implementation simply calls {@link #endErrorCondition} to + # ensure that the handler is not in error recovery mode.
+ def reset(self, recognizer:Parser): + self.endErrorCondition(recognizer) + + # + # This method is called to enter error recovery mode when a recognition + # exception is reported. + # + # @param recognizer the parser instance + # + def beginErrorCondition(self, recognizer:Parser): + self.errorRecoveryMode = True + + def inErrorRecoveryMode(self, recognizer:Parser): + return self.errorRecoveryMode + + # + # This method is called to leave error recovery mode after recovering from + # a recognition exception. + # + # @param recognizer + # + def endErrorCondition(self, recognizer:Parser): + self.errorRecoveryMode = False + self.lastErrorStates = None + self.lastErrorIndex = -1 + + # + # {@inheritDoc} + # + #The default implementation simply calls {@link #endErrorCondition}.
+ # + def reportMatch(self, recognizer:Parser): + self.endErrorCondition(recognizer) + + # + # {@inheritDoc} + # + #The default implementation returns immediately if the handler is already + # in error recovery mode. Otherwise, it calls {@link #beginErrorCondition} + # and dispatches the reporting task based on the runtime type of {@code e} + # according to the following table.
+ # + #The default implementation resynchronizes the parser by consuming tokens + # until we find one in the resynchronization set--loosely the set of tokens + # that can follow the current rule.
+ # + def recover(self, recognizer:Parser, e:RecognitionException): + if self.lastErrorIndex==recognizer.getInputStream().index \ + and self.lastErrorStates is not None \ + and recognizer.state in self.lastErrorStates: + # uh oh, another error at same token index and previously-visited + # state in ATN; must be a case where LT(1) is in the recovery + # token set so nothing got consumed. Consume a single token + # at least to prevent an infinite loop; this is a failsafe. + recognizer.consume() + + self.lastErrorIndex = recognizer._input.index + if self.lastErrorStates is None: + self.lastErrorStates = [] + self.lastErrorStates.append(recognizer.state) + followSet = self.getErrorRecoverySet(recognizer) + self.consumeUntil(recognizer, followSet) + + # The default implementation of {@link ANTLRErrorStrategy#sync} makes sure + # that the current lookahead symbol is consistent with what were expecting + # at this point in the ATN. You can call this anytime but ANTLR only + # generates code to check before subrules/loops and each iteration. + # + #Implements Jim Idle's magic sync mechanism in closures and optional + # subrules. E.g.,
+ # + #
+ # a : sync ( stuff sync )* ;
+ # sync : {consume to what can follow sync} ;
+ #
+ #
+ # At the start of a sub rule upon error, {@link #sync} performs single
+ # token deletion, if possible. If it can't do that, it bails on the current
+ # rule and uses the default error recovery, which consumes until the
+ # resynchronization set of the current rule.
+ #
+ # If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block + # with an empty alternative), then the expected set includes what follows + # the subrule.
+ # + #During loop iteration, it consumes until it sees a token that can start a + # sub rule or what follows loop. Yes, that is pretty aggressive. We opt to + # stay in the loop as long as possible.
+ # + #ORIGINS
+ # + #Previous versions of ANTLR did a poor job of their recovery within loops. + # A single mismatch token or missing token would force the parser to bail + # out of the entire rules surrounding the loop. So, for rule
+ # + #
+ # classDef : 'class' ID '{' member* '}'
+ #
+ #
+ # input with an extra token between members would force the parser to
+ # consume until it found the next class definition rather than the next
+ # member definition of the current class.
+ #
+ # This functionality cost a little bit of effort because the parser has to + # compare token set at the start of the loop and at each iteration. If for + # some reason speed is suffering for you, you can turn off this + # functionality by simply overriding this method as a blank { }.
+ # + def sync(self, recognizer:Parser): + # If already recovering, don't try to sync + if self.inErrorRecoveryMode(recognizer): + return + + s = recognizer._interp.atn.states[recognizer.state] + la = recognizer.getTokenStream().LA(1) + # try cheaper subset first; might get lucky. seems to shave a wee bit off + nextTokens = recognizer.atn.nextTokens(s) + if la in nextTokens: + self.nextTokensContext = None + self.nextTokenState = ATNState.INVALID_STATE_NUMBER + return + elif Token.EPSILON in nextTokens: + if self.nextTokensContext is None: + # It's possible the next token won't match information tracked + # by sync is restricted for performance. + self.nextTokensContext = recognizer._ctx + self.nextTokensState = recognizer._stateNumber + return + + if s.stateType in [ATNState.BLOCK_START, ATNState.STAR_BLOCK_START, + ATNState.PLUS_BLOCK_START, ATNState.STAR_LOOP_ENTRY]: + # report error and recover if possible + if self.singleTokenDeletion(recognizer)is not None: + return + else: + raise InputMismatchException(recognizer) + + elif s.stateType in [ATNState.PLUS_LOOP_BACK, ATNState.STAR_LOOP_BACK]: + self.reportUnwantedToken(recognizer) + expecting = recognizer.getExpectedTokens() + whatFollowsLoopIterationOrRule = expecting.addSet(self.getErrorRecoverySet(recognizer)) + self.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + + else: + # do nothing if we can't identify the exact kind of ATN state + pass + + # This is called by {@link #reportError} when the exception is a + # {@link NoViableAltException}. + # + # @see #reportError + # + # @param recognizer the parser instance + # @param e the recognition exception + # + def reportNoViableAlternative(self, recognizer:Parser, e:NoViableAltException): + tokens = recognizer.getTokenStream() + if tokens is not None: + if e.startToken.type==Token.EOF: + input = "This method is called when {@link #singleTokenDeletion} identifies + # single-token deletion as a viable recovery strategy for a mismatched + # input error.
+ # + #The default implementation simply returns if the handler is already in + # error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to + # enter error recovery mode, followed by calling + # {@link Parser#notifyErrorListeners}.
+ # + # @param recognizer the parser instance + # + def reportUnwantedToken(self, recognizer:Parser): + if self.inErrorRecoveryMode(recognizer): + return + + self.beginErrorCondition(recognizer) + t = recognizer.getCurrentToken() + tokenName = self.getTokenErrorDisplay(t) + expecting = self.getExpectedTokens(recognizer) + msg = "extraneous input " + tokenName + " expecting " \ + + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) + recognizer.notifyErrorListeners(msg, t, None) + + # This method is called to report a syntax error which requires the + # insertion of a missing token into the input stream. At the time this + # method is called, the missing token has not yet been inserted. When this + # method returns, {@code recognizer} is in error recovery mode. + # + #This method is called when {@link #singleTokenInsertion} identifies + # single-token insertion as a viable recovery strategy for a mismatched + # input error.
+ # + #The default implementation simply returns if the handler is already in + # error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to + # enter error recovery mode, followed by calling + # {@link Parser#notifyErrorListeners}.
+ # + # @param recognizer the parser instance + # + def reportMissingToken(self, recognizer:Parser): + if self.inErrorRecoveryMode(recognizer): + return + self.beginErrorCondition(recognizer) + t = recognizer.getCurrentToken() + expecting = self.getExpectedTokens(recognizer) + msg = "missing " + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) \ + + " at " + self.getTokenErrorDisplay(t) + recognizer.notifyErrorListeners(msg, t, None) + + #The default implementation attempts to recover from the mismatched input + # by using single token insertion and deletion as described below. If the + # recovery attempt fails, this method throws an + # {@link InputMismatchException}.
+ # + #EXTRA TOKEN (single token deletion)
+ # + #{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the + # right token, however, then assume {@code LA(1)} is some extra spurious + # token and delete it. Then consume and return the next token (which was + # the {@code LA(2)} token) as the successful result of the match operation.
+ # + #This recovery strategy is implemented by {@link #singleTokenDeletion}.
+ # + #MISSING TOKEN (single token insertion)
+ # + #If current token (at {@code LA(1)}) is consistent with what could come + # after the expected {@code LA(1)} token, then assume the token is missing + # and use the parser's {@link TokenFactory} to create it on the fly. The + # "insertion" is performed by returning the created token as the successful + # result of the match operation.
+ # + #This recovery strategy is implemented by {@link #singleTokenInsertion}.
+ # + #EXAMPLE
+ # + #For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When + # the parser returns from the nested call to {@code expr}, it will have + # call chain:
+ # + #+ # stat → expr → atom + #+ # + # and it will be trying to match the {@code ')'} at this point in the + # derivation: + # + #
+ # => ID '=' '(' INT ')' ('+' atom)* ';'
+ # ^
+ #
+ #
+ # The attempt to match {@code ')'} will fail when it sees {@code ';'} and
+ # call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'}
+ # is in the set of tokens that can follow the {@code ')'} token reference
+ # in rule {@code atom}. It can assume that you forgot the {@code ')'}.
+ #
+ def recoverInline(self, recognizer:Parser):
+ # SINGLE TOKEN DELETION
+ matchedSymbol = self.singleTokenDeletion(recognizer)
+ if matchedSymbol is not None:
+ # we have deleted the extra token.
+ # now, move past ttype token as if all were ok
+ recognizer.consume()
+ return matchedSymbol
+
+ # SINGLE TOKEN INSERTION
+ if self.singleTokenInsertion(recognizer):
+ return self.getMissingSymbol(recognizer)
+
+ # even that didn't work; must throw the exception
+ raise InputMismatchException(recognizer)
+
+ #
+ # This method implements the single-token insertion inline error recovery
+ # strategy. It is called by {@link #recoverInline} if the single-token
+ # deletion strategy fails to recover from the mismatched input. If this
+ # method returns {@code true}, {@code recognizer} will be in error recovery
+ # mode.
+ #
+ # This method determines whether or not single-token insertion is viable by + # checking if the {@code LA(1)} input symbol could be successfully matched + # if it were instead the {@code LA(2)} symbol. If this method returns + # {@code true}, the caller is responsible for creating and inserting a + # token with the correct type to produce this behavior.
+ # + # @param recognizer the parser instance + # @return {@code true} if single-token insertion is a viable recovery + # strategy for the current mismatched input, otherwise {@code false} + # + def singleTokenInsertion(self, recognizer:Parser): + currentSymbolType = recognizer.getTokenStream().LA(1) + # if current token is consistent with what could come after current + # ATN state, then we know we're missing a token; error recovery + # is free to conjure up and insert the missing token + atn = recognizer._interp.atn + currentState = atn.states[recognizer.state] + next = currentState.transitions[0].target + expectingAtLL2 = atn.nextTokens(next, recognizer._ctx) + if currentSymbolType in expectingAtLL2: + self.reportMissingToken(recognizer) + return True + else: + return False + + # This method implements the single-token deletion inline error recovery + # strategy. It is called by {@link #recoverInline} to attempt to recover + # from mismatched input. If this method returns null, the parser and error + # handler state will not have changed. If this method returns non-null, + # {@code recognizer} will not be in error recovery mode since the + # returned token was a successful match. + # + #If the single-token deletion is successful, this method calls + # {@link #reportUnwantedToken} to report the error, followed by + # {@link Parser#consume} to actually "delete" the extraneous token. Then, + # before returning {@link #reportMatch} is called to signal a successful + # match.
+ # + # @param recognizer the parser instance + # @return the successfully matched {@link Token} instance if single-token + # deletion successfully recovers from the mismatched input, otherwise + # {@code null} + # + def singleTokenDeletion(self, recognizer:Parser): + nextTokenType = recognizer.getTokenStream().LA(2) + expecting = self.getExpectedTokens(recognizer) + if nextTokenType in expecting: + self.reportUnwantedToken(recognizer) + # print("recoverFromMismatchedToken deleting " \ + # + str(recognizer.getTokenStream().LT(1)) \ + # + " since " + str(recognizer.getTokenStream().LT(2)) \ + # + " is what we want", file=sys.stderr) + recognizer.consume() # simply delete extra token + # we want to return the token we're actually matching + matchedSymbol = recognizer.getCurrentToken() + self.reportMatch(recognizer) # we know current token is correct + return matchedSymbol + else: + return None + + # Conjure up a missing token during error recovery. + # + # The recognizer attempts to recover from single missing + # symbols. But, actions might refer to that missing symbol. + # For example, x=ID {f($x);}. The action clearly assumes + # that there has been an identifier matched previously and that + # $x points at that token. If that token is missing, but + # the next token in the stream is what we want we assume that + # this token is missing and we keep going. Because we + # have to return some token to replace the missing token, + # we have to conjure one up. This method gives the user control + # over the tokens returned for missing tokens. Mostly, + # you will want to create something special for identifier + # tokens. For literals such as '{' and ',', the default + # action in the parser or tree parser works. It simply creates + # a CommonToken of the appropriate type. The text will be the token. + # If you change what tokens must be created by the lexer, + # override this method to create the appropriate tokens. + # + def getMissingSymbol(self, recognizer:Parser): + currentSymbol = recognizer.getCurrentToken() + expecting = self.getExpectedTokens(recognizer) + expectedTokenType = expecting[0] # get any element + if expectedTokenType==Token.EOF: + tokenText = "+# This error strategy is useful in the following scenarios.
+# +#+# {@code myparser.setErrorHandler(new BailErrorStrategy());}
+# +# @see Parser#setErrorHandler(ANTLRErrorStrategy) +# +class BailErrorStrategy(DefaultErrorStrategy): + # Instead of recovering from exception {@code e}, re-throw it wrapped + # in a {@link ParseCancellationException} so it is not caught by the + # rule function catches. Use {@link Exception#getCause()} to get the + # original {@link RecognitionException}. + # + def recover(self, recognizer:Parser, e:RecognitionException): + context = recognizer._ctx + while context is not None: + context.exception = e + context = context.parentCtx + raise ParseCancellationException(e) + + # Make sure we don't attempt to recover inline; if the parser + # successfully recovers, it won't throw an exception. + # + def recoverInline(self, recognizer:Parser): + self.recover(recognizer, InputMismatchException(recognizer)) + + # Make sure we don't attempt to recover from problems in subrules.# + def sync(self, recognizer:Parser): + pass + +del Parser \ No newline at end of file diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/Errors.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/Errors.py new file mode 100644 index 0000000000000000000000000000000000000000..e78ac05911d3c9569441fe376ff7d6c686c05c95 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/Errors.py @@ -0,0 +1,172 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +# need forward declaration +Token = None +Lexer = None +Parser = None +TokenStream = None +ATNConfigSet = None +ParserRulecontext = None +PredicateTransition = None +BufferedTokenStream = None + +class UnsupportedOperationException(Exception): + + def __init__(self, msg:str): + super().__init__(msg) + +class IllegalStateException(Exception): + + def __init__(self, msg:str): + super().__init__(msg) + +class CancellationException(IllegalStateException): + + def __init__(self, msg:str): + super().__init__(msg) + +# The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +# 3 kinds of errors: prediction errors, failed predicate errors, and +# mismatched input errors. In each case, the parser knows where it is +# in the input, where it is in the ATN, the rule invocation stack, +# and what kind of problem occurred. + +from antlr4.InputStream import InputStream +from antlr4.ParserRuleContext import ParserRuleContext +from antlr4.Recognizer import Recognizer + +class RecognitionException(Exception): + + + def __init__(self, message:str=None, recognizer:Recognizer=None, input:InputStream=None, ctx:ParserRulecontext=None): + super().__init__(message) + self.message = message + self.recognizer = recognizer + self.input = input + self.ctx = ctx + # The current {@link Token} when an error occurred. Since not all streams + # support accessing symbols by index, we have to track the {@link Token} + # instance itself. + self.offendingToken = None + # Get the ATN state number the parser was in at the time the error + # occurred. For {@link NoViableAltException} and + # {@link LexerNoViableAltException} exceptions, this is the + # {@link DecisionState} number. For others, it is the state whose outgoing + # edge we couldn't match. + self.offendingState = -1 + if recognizer is not None: + self.offendingState = recognizer.state + + #If the state number is not known, this method returns -1.
+ + # + # Gets the set of input symbols which could potentially follow the + # previously matched symbol at the time this exception was thrown. + # + #If the set of expected tokens is not known and could not be computed, + # this method returns {@code null}.
+ # + # @return The set of token types that could potentially follow the current + # state in the ATN, or {@code null} if the information is not available. + #/ + def getExpectedTokens(self): + if self.recognizer is not None: + return self.recognizer.atn.getExpectedTokens(self.offendingState, self.ctx) + else: + return None + + +class LexerNoViableAltException(RecognitionException): + + def __init__(self, lexer:Lexer, input:InputStream, startIndex:int, deadEndConfigs:ATNConfigSet): + super().__init__(message=None, recognizer=lexer, input=input, ctx=None) + self.startIndex = startIndex + self.deadEndConfigs = deadEndConfigs + + def __str__(self): + symbol = "" + if self.startIndex >= 0 and self.startIndex < self.input.size: + symbol = self.input.getText(self.startIndex, self.startIndex) + # TODO symbol = Utils.escapeWhitespace(symbol, false); + return "LexerNoViableAltException('" + symbol + "')" + +# Indicates that the parser could not decide which of two or more paths +# to take based upon the remaining input. It tracks the starting token +# of the offending input and also knows where the parser was +# in the various paths when the error. Reported by reportNoViableAlternative() +# +class NoViableAltException(RecognitionException): + + def __init__(self, recognizer:Parser, input:TokenStream=None, startToken:Token=None, + offendingToken:Token=None, deadEndConfigs:ATNConfigSet=None, ctx:ParserRuleContext=None): + if ctx is None: + ctx = recognizer._ctx + if offendingToken is None: + offendingToken = recognizer.getCurrentToken() + if startToken is None: + startToken = recognizer.getCurrentToken() + if input is None: + input = recognizer.getInputStream() + super().__init__(recognizer=recognizer, input=input, ctx=ctx) + # Which configurations did we try at input.index() that couldn't match input.LT(1)?# + self.deadEndConfigs = deadEndConfigs + # The token object at the start index; the input stream might + # not be buffering tokens so get a reference to it. (At the + # time the error occurred, of course the stream needs to keep a + # buffer all of the tokens but later we might not have access to those.) + self.startToken = startToken + self.offendingToken = offendingToken + +# This signifies any kind of mismatched input exceptions such as +# when the current input does not match the expected token. +# +class InputMismatchException(RecognitionException): + + def __init__(self, recognizer:Parser): + super().__init__(recognizer=recognizer, input=recognizer.getInputStream(), ctx=recognizer._ctx) + self.offendingToken = recognizer.getCurrentToken() + + +# A semantic predicate failed during validation. Validation of predicates +# occurs when normally parsing the alternative just like matching a token. +# Disambiguating predicate evaluation occurs when we test a predicate during +# prediction. + +class FailedPredicateException(RecognitionException): + + def __init__(self, recognizer:Parser, predicate:str=None, message:str=None): + super().__init__(message=self.formatMessage(predicate,message), recognizer=recognizer, + input=recognizer.getInputStream(), ctx=recognizer._ctx) + s = recognizer._interp.atn.states[recognizer.state] + trans = s.transitions[0] + from antlr4.atn.Transition import PredicateTransition + if isinstance(trans, PredicateTransition): + self.ruleIndex = trans.ruleIndex + self.predicateIndex = trans.predIndex + else: + self.ruleIndex = 0 + self.predicateIndex = 0 + self.predicate = predicate + self.offendingToken = recognizer.getCurrentToken() + + def formatMessage(self, predicate:str, message:str): + if message is not None: + return message + else: + return "failed predicate: {" + predicate + "}?" + +class ParseCancellationException(CancellationException): + + pass + +del Token +del Lexer +del Parser +del TokenStream +del ATNConfigSet +del ParserRulecontext +del PredicateTransition +del BufferedTokenStream diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..216c000dc5ffc8e53cc9c596e420c1e67604d1aa --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/__init__.py @@ -0,0 +1 @@ +__author__ = 'ericvergnaud' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Chunk.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Chunk.py new file mode 100644 index 0000000000000000000000000000000000000000..081419a34f65463b370b848b141192bfe491befd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Chunk.py @@ -0,0 +1,30 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + +class Chunk(object): + pass + +class TagChunk(Chunk): + __slots__ = ('tag', 'label') + + def __init__(self, tag:str, label:str=None): + self.tag = tag + self.label = label + + def __str__(self): + if self.label is None: + return self.tag + else: + return self.label + ":" + self.tag + +class TextChunk(Chunk): + __slots__ = 'text' + + def __init__(self, text:str): + self.text = text + + def __str__(self): + return "'" + self.text + "'" diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreeMatch.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreeMatch.py new file mode 100644 index 0000000000000000000000000000000000000000..c02bc0357d26b343a72307cda77ff62fe307a44a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreeMatch.py @@ -0,0 +1,118 @@ +# +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +# + + +# +# Represents the result of matching a {@link ParseTree} against a tree pattern. +# +from io import StringIO +from antlr4.tree.ParseTreePattern import ParseTreePattern +from antlr4.tree.Tree import ParseTree + + +class ParseTreeMatch(object): + __slots__ = ('tree', 'pattern', 'labels', 'mismatchedNode') + # + # Constructs a new instance of {@link ParseTreeMatch} from the specified + # parse tree and pattern. + # + # @param tree The parse tree to match against the pattern. + # @param pattern The parse tree pattern. + # @param labels A mapping from label names to collections of + # {@link ParseTree} objects located by the tree pattern matching process. + # @param mismatchedNode The first node which failed to match the tree + # pattern during the matching process. + # + # @exception IllegalArgumentException if {@code tree} is {@code null} + # @exception IllegalArgumentException if {@code pattern} is {@code null} + # @exception IllegalArgumentException if {@code labels} is {@code null} + # + def __init__(self, tree:ParseTree, pattern:ParseTreePattern, labels:dict, mismatchedNode:ParseTree): + if tree is None: + raise Exception("tree cannot be null") + if pattern is None: + raise Exception("pattern cannot be null") + if labels is None: + raise Exception("labels cannot be null") + self.tree = tree + self.pattern = pattern + self.labels = labels + self.mismatchedNode = mismatchedNode + + # + # Get the last node associated with a specific {@code label}. + # + #For example, for pattern {@code
Pattern tags like {@code
If the {@code label} is the name of a parser rule or token in the + # grammar, the resulting list will contain both the parse trees matching + # rule or tags explicitly labeled with the label and the complete set of + # parse trees matching the labeled and unlabeled tags in the pattern for + # the parser rule or token. For example, if {@code label} is {@code "foo"}, + # the result will contain all of the following.
+ # + #Patterns are strings of source input text with special tags representing +# token or rule references such as:
+# +#{@code
Given a pattern start rule such as {@code statement}, this object constructs
+# a {@link ParseTree} with placeholders for the {@code ID} and {@code expr}
+# subtree. Then the {@link #match} routines can compare an actual
+# {@link ParseTree} from a parse with this pattern. Tag {@code
Pattern {@code x = 0;} is a similar pattern that matches the same pattern +# except that it requires the identifier to be {@code x} and the expression to +# be {@code 0}.
+# +#The {@link #matches} routines return {@code true} or {@code false} based +# upon a match for the tree rooted at the parameter sent in. The +# {@link #match} routines return a {@link ParseTreeMatch} object that +# contains the parse tree, the parse tree pattern, and a map from tag name to +# matched nodes (more below). A subtree that fails to match, returns with +# {@link ParseTreeMatch#mismatchedNode} set to the first tree node that did not +# match.
+# +#For efficiency, you can compile a tree pattern in string form to a +# {@link ParseTreePattern} object.
+# +#See {@code TestParseTreeMatcher} for lots of examples. +# {@link ParseTreePattern} has two static helper methods: +# {@link ParseTreePattern#findAll} and {@link ParseTreePattern#match} that +# are easy to use but not super efficient because they create new +# {@link ParseTreePatternMatcher} objects each time and have to compile the +# pattern in string form before using it.
+# +#The lexer and parser that you pass into the {@link ParseTreePatternMatcher}
+# constructor are used to parse the pattern in string form. The lexer converts
+# the {@code
Normally a parser does not accept token {@code
Delimiters are {@code <} and {@code >}, with {@code \} as the escape string +# by default, but you can set them to whatever you want using +# {@link #setDelimiters}. You must escape both start and stop strings +# {@code \<} and {@code \>}.
+# +from antlr4.CommonTokenStream import CommonTokenStream +from antlr4.InputStream import InputStream +from antlr4.ParserRuleContext import ParserRuleContext +from antlr4.Lexer import Lexer +from antlr4.ListTokenSource import ListTokenSource +from antlr4.Token import Token +from antlr4.error.ErrorStrategy import BailErrorStrategy +from antlr4.error.Errors import RecognitionException, ParseCancellationException +from antlr4.tree.Chunk import TagChunk, TextChunk +from antlr4.tree.RuleTagToken import RuleTagToken +from antlr4.tree.TokenTagToken import TokenTagToken +from antlr4.tree.Tree import ParseTree, TerminalNode, RuleNode + +# need forward declaration +Parser = None +ParseTreePattern = None + +class CannotInvokeStartRule(Exception): + + def __init__(self, e:Exception): + super().__init__(e) + +class StartRuleDoesNotConsumeFullPattern(Exception): + + pass + + +class ParseTreePatternMatcher(object): + __slots__ = ('lexer', 'parser', 'start', 'stop', 'escape') + + # Constructs a {@link ParseTreePatternMatcher} or from a {@link Lexer} and + # {@link Parser} object. The lexer input stream is altered for tokenizing + # the tree patterns. The parser is used as a convenient mechanism to get + # the grammar name, plus token, rule names. + def __init__(self, lexer:Lexer, parser:Parser): + self.lexer = lexer + self.parser = parser + self.start = "<" + self.stop = ">" + self.escape = "\\" # e.g., \< and \> must escape BOTH! + + # Set the delimiters used for marking rule and token tags within concrete + # syntax used by the tree pattern parser. + # + # @param start The start delimiter. + # @param stop The stop delimiter. + # @param escapeLeft The escape sequence to use for escaping a start or stop delimiter. + # + # @exception IllegalArgumentException if {@code start} is {@code null} or empty. + # @exception IllegalArgumentException if {@code stop} is {@code null} or empty. + # + def setDelimiters(self, start:str, stop:str, escapeLeft:str): + if start is None or len(start)==0: + raise Exception("start cannot be null or empty") + if stop is None or len(stop)==0: + raise Exception("stop cannot be null or empty") + self.start = start + self.stop = stop + self.escape = escapeLeft + + # Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code tree}?# + def matchesRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int): + p = self.compileTreePattern(pattern, patternRuleIndex) + return self.matches(tree, p) + + # Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in a + # compiled pattern instead of a string representation of a tree pattern. + # + def matchesPattern(self, tree:ParseTree, pattern:ParseTreePattern): + mismatchedNode = self.matchImpl(tree, pattern.patternTree, dict()) + return mismatchedNode is None + + # + # Compare {@code pattern} matched as rule {@code patternRuleIndex} against + # {@code tree} and return a {@link ParseTreeMatch} object that contains the + # matched elements, or the node at which the match failed. + # + def matchRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int): + p = self.compileTreePattern(pattern, patternRuleIndex) + return self.matchPattern(tree, p) + + # + # Compare {@code pattern} matched against {@code tree} and return a + # {@link ParseTreeMatch} object that contains the matched elements, or the + # node at which the match failed. Pass in a compiled pattern instead of a + # string representation of a tree pattern. + # + def matchPattern(self, tree:ParseTree, pattern:ParseTreePattern): + labels = dict() + mismatchedNode = self.matchImpl(tree, pattern.patternTree, labels) + from antlr4.tree.ParseTreeMatch import ParseTreeMatch + return ParseTreeMatch(tree, pattern, labels, mismatchedNode) + + # + # For repeated use of a tree pattern, compile it to a + # {@link ParseTreePattern} using this method. + # + def compileTreePattern(self, pattern:str, patternRuleIndex:int): + tokenList = self.tokenize(pattern) + tokenSrc = ListTokenSource(tokenList) + tokens = CommonTokenStream(tokenSrc) + from antlr4.ParserInterpreter import ParserInterpreter + parserInterp = ParserInterpreter(self.parser.grammarFileName, self.parser.tokenNames, + self.parser.ruleNames, self.parser.getATNWithBypassAlts(),tokens) + tree = None + try: + parserInterp.setErrorHandler(BailErrorStrategy()) + tree = parserInterp.parse(patternRuleIndex) + except ParseCancellationException as e: + raise e.cause + except RecognitionException as e: + raise e + except Exception as e: + raise CannotInvokeStartRule(e) + + # Make sure tree pattern compilation checks for a complete parse + if tokens.LA(1)!=Token.EOF: + raise StartRuleDoesNotConsumeFullPattern() + + from antlr4.tree.ParseTreePattern import ParseTreePattern + return ParseTreePattern(self, pattern, patternRuleIndex, tree) + + # + # Recursively walk {@code tree} against {@code patternTree}, filling + # {@code match.}{@link ParseTreeMatch#labels labels}. + # + # @return the first node encountered in {@code tree} which does not match + # a corresponding node in {@code patternTree}, or {@code null} if the match + # was successful. The specific node returned depends on the matching + # algorithm used by the implementation, and may be overridden. + # + def matchImpl(self, tree:ParseTree, patternTree:ParseTree, labels:dict): + if tree is None: + raise Exception("tree cannot be null") + if patternTree is None: + raise Exception("patternTree cannot be null") + + # x andThe implementation for {@link TokenTagToken} returns the token tag + # formatted with {@code <} and {@code >} delimiters.
+ # + def getText(self): + if self.label is None: + return "<" + self.tokenName + ">" + else: + return "<" + self.label + ":" + self.tokenName + ">" + + #The implementation for {@link TokenTagToken} returns a string of the form + # {@code tokenName:type}.
+ # + def __str__(self): + return self.tokenName + ":" + str(self.type) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Tree.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Tree.py new file mode 100644 index 0000000000000000000000000000000000000000..812acc96bbee97860bc8a914feedcd0584def050 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Tree.py @@ -0,0 +1,191 @@ +# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +# Use of this file is governed by the BSD 3-clause license that +# can be found in the LICENSE.txt file in the project root. +#/ + + +# The basic notion of a tree has a parent, a payload, and a list of children. +# It is the most abstract interface for all the trees used by ANTLR. +#/ +from antlr4.Token import Token + +INVALID_INTERVAL = (-1, -2) + +class Tree(object): + pass + +class SyntaxTree(Tree): + pass + +class ParseTree(SyntaxTree): + pass + +class RuleNode(ParseTree): + pass + +class TerminalNode(ParseTree): + pass + +class ErrorNode(TerminalNode): + pass + +class ParseTreeVisitor(object): + def visit(self, tree): + return tree.accept(self) + + def visitChildren(self, node): + result = self.defaultResult() + n = node.getChildCount() + for i in range(n): + if not self.shouldVisitNextChild(node, result): + return result + + c = node.getChild(i) + childResult = c.accept(self) + result = self.aggregateResult(result, childResult) + + return result + + def visitTerminal(self, node): + return self.defaultResult() + + def visitErrorNode(self, node): + return self.defaultResult() + + def defaultResult(self): + return None + + def aggregateResult(self, aggregate, nextResult): + return nextResult + + def shouldVisitNextChild(self, node, currentResult): + return True + +ParserRuleContext = None + +class ParseTreeListener(object): + + def visitTerminal(self, node:TerminalNode): + pass + + def visitErrorNode(self, node:ErrorNode): + pass + + def enterEveryRule(self, ctx:ParserRuleContext): + pass + + def exitEveryRule(self, ctx:ParserRuleContext): + pass + +del ParserRuleContext + +class TerminalNodeImpl(TerminalNode): + __slots__ = ('parentCtx', 'symbol') + + def __init__(self, symbol:Token): + self.parentCtx = None + self.symbol = symbol + def __setattr__(self, key, value): + super().__setattr__(key, value) + + def getChild(self, i:int): + return None + + def getSymbol(self): + return self.symbol + + def getParent(self): + return self.parentCtx + + def getPayload(self): + return self.symbol + + def getSourceInterval(self): + if self.symbol is None: + return INVALID_INTERVAL + tokenIndex = self.symbol.tokenIndex + return (tokenIndex, tokenIndex) + + def getChildCount(self): + return 0 + + def accept(self, visitor:ParseTreeVisitor): + return visitor.visitTerminal(self) + + def getText(self): + return self.symbol.text + + def __str__(self): + if self.symbol.type == Token.EOF: + return "+# Split path into words and separators {@code /} and {@code //} via ANTLR +# itself then walk path elements from left to right. At each separator-word +# pair, find set of nodes. Next stage uses those as work list.
+# +#+# The basic interface is +# {@link XPath#findAll ParseTree.findAll}{@code (tree, pathString, parser)}. +# But that is just shorthand for:
+# +#
+# {@link XPath} p = new {@link XPath#XPath XPath}(parser, pathString);
+# return p.{@link #evaluate evaluate}(tree);
+#
+#
+# +# See {@code org.antlr.v4.test.TestXPath} for descriptions. In short, this +# allows operators:
+# +#+# and path elements:
+# +#+# Whitespace is not allowed.
+# +from antlr4 import CommonTokenStream, DFA, PredictionContextCache, Lexer, LexerATNSimulator, ParserRuleContext, TerminalNode +from antlr4.InputStream import InputStream +from antlr4.Parser import Parser +from antlr4.RuleContext import RuleContext +from antlr4.Token import Token +from antlr4.atn.ATNDeserializer import ATNDeserializer +from antlr4.error.ErrorListener import ErrorListener +from antlr4.error.Errors import LexerNoViableAltException +from antlr4.tree.Tree import ParseTree +from antlr4.tree.Trees import Trees +from io import StringIO + + +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\n") + buf.write("\64\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t") + buf.write("\7\4\b\t\b\4\t\t\t\3\2\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5") + buf.write("\3\6\3\6\7\6\37\n\6\f\6\16\6\"\13\6\3\6\3\6\3\7\3\7\5") + buf.write("\7(\n\7\3\b\3\b\3\t\3\t\7\t.\n\t\f\t\16\t\61\13\t\3\t") + buf.write("\3\t\3/\2\n\3\5\5\6\7\7\t\b\13\t\r\2\17\2\21\n\3\2\4\7") + buf.write("\2\62;aa\u00b9\u00b9\u0302\u0371\u2041\u2042\17\2C\\c") + buf.write("|\u00c2\u00d8\u00da\u00f8\u00fa\u0301\u0372\u037f\u0381") + buf.write("\u2001\u200e\u200f\u2072\u2191\u2c02\u2ff1\u3003\ud801") + buf.write("\uf902\ufdd1\ufdf2\uffff\64\2\3\3\2\2\2\2\5\3\2\2\2\2") + buf.write("\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\21\3\2\2\2\3\23") + buf.write("\3\2\2\2\5\26\3\2\2\2\7\30\3\2\2\2\t\32\3\2\2\2\13\34") + buf.write("\3\2\2\2\r\'\3\2\2\2\17)\3\2\2\2\21+\3\2\2\2\23\24\7\61") + buf.write("\2\2\24\25\7\61\2\2\25\4\3\2\2\2\26\27\7\61\2\2\27\6\3") + buf.write("\2\2\2\30\31\7,\2\2\31\b\3\2\2\2\32\33\7#\2\2\33\n\3\2") + buf.write("\2\2\34 \5\17\b\2\35\37\5\r\7\2\36\35\3\2\2\2\37\"\3\2") + buf.write("\2\2 \36\3\2\2\2 !\3\2\2\2!#\3\2\2\2\" \3\2\2\2#$\b\6") + buf.write("\2\2$\f\3\2\2\2%(\5\17\b\2&(\t\2\2\2\'%\3\2\2\2\'&\3\2") + buf.write("\2\2(\16\3\2\2\2)*\t\3\2\2*\20\3\2\2\2+/\7)\2\2,.\13\2") + buf.write("\2\2-,\3\2\2\2.\61\3\2\2\2/\60\3\2\2\2/-\3\2\2\2\60\62") + buf.write("\3\2\2\2\61/\3\2\2\2\62\63\7)\2\2\63\22\3\2\2\2\6\2 \'") + buf.write("/\3\3\6\2") + return buf.getvalue() + + +class XPathLexer(Lexer): + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + + TOKEN_REF = 1 + RULE_REF = 2 + ANYWHERE = 3 + ROOT = 4 + WILDCARD = 5 + BANG = 6 + ID = 7 + STRING = 8 + + modeNames = [ "DEFAULT_MODE" ] + + literalNames = [ "