prasb commited on
Commit
b5147e7
·
verified ·
1 Parent(s): 39570ac

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/CommonTokenFactory.cpython-38.pyc +0 -0
  3. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/InputStream.cpython-38.pyc +0 -0
  4. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/ListTokenSource.cpython-38.pyc +0 -0
  5. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/Recognizer.cpython-38.pyc +0 -0
  6. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/RuleContext.cpython-38.pyc +0 -0
  7. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/TokenStreamRewriter.cpython-38.pyc +0 -0
  8. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATN.py +132 -0
  9. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfig.py +159 -0
  10. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfigSet.py +212 -0
  11. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializationOptions.py +24 -0
  12. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializer.py +529 -0
  13. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNSimulator.py +47 -0
  14. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNState.py +264 -0
  15. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNType.py +17 -0
  16. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerATNSimulator.py +570 -0
  17. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerAction.py +298 -0
  18. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerActionExecutor.py +143 -0
  19. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ParserATNSimulator.py +1649 -0
  20. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/PredictionMode.py +499 -0
  21. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/SemanticContext.py +323 -0
  22. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/Transition.py +268 -0
  23. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/__init__.py +1 -0
  24. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFA.py +133 -0
  25. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFASerializer.py +73 -0
  26. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFAState.py +126 -0
  27. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/__init__.py +1 -0
  28. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/DiagnosticErrorListener.py +107 -0
  29. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorListener.py +72 -0
  30. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorStrategy.py +709 -0
  31. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/Errors.py +172 -0
  32. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/__init__.py +1 -0
  33. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Chunk.py +30 -0
  34. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreeMatch.py +118 -0
  35. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreePattern.py +72 -0
  36. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreePatternMatcher.py +374 -0
  37. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/RuleTagToken.py +50 -0
  38. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/TokenTagToken.py +47 -0
  39. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Tree.py +191 -0
  40. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Trees.py +111 -0
  41. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__init__.py +0 -0
  42. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/Chunk.cpython-38.pyc +0 -0
  43. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/RuleTagToken.cpython-38.pyc +0 -0
  44. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/Tree.cpython-38.pyc +0 -0
  45. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/xpath/XPath.py +352 -0
  46. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/xpath/__init__.py +1 -0
  47. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/objectify.cpython-38-x86_64-linux-gnu.so +3 -0
  48. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/__pycache__/backend_bases.cpython-38.pyc +3 -0
  49. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_api/__pycache__/__init__.cpython-38.pyc +0 -0
  50. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_api/__pycache__/deprecation.cpython-38.pyc +0 -0
.gitattributes CHANGED
@@ -370,3 +370,7 @@ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor
370
  my_container_sandbox/workspace/anaconda3/lib/libnppist.so.11.3.3.95 filter=lfs diff=lfs merge=lfs -text
371
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/more.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text
372
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor/pyparsing/__pycache__/core.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
370
  my_container_sandbox/workspace/anaconda3/lib/libnppist.so.11.3.3.95 filter=lfs diff=lfs merge=lfs -text
371
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/_vendor/more_itertools/__pycache__/more.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text
372
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor/pyparsing/__pycache__/core.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text
373
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text
374
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pkg_resources/__pycache__/__init__.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text
375
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/__pycache__/backend_bases.cpython-38.pyc filter=lfs diff=lfs merge=lfs -text
376
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/objectify.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/CommonTokenFactory.cpython-38.pyc ADDED
Binary file (1.33 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/InputStream.cpython-38.pyc ADDED
Binary file (2.79 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/ListTokenSource.cpython-38.pyc ADDED
Binary file (2.6 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/Recognizer.cpython-38.pyc ADDED
Binary file (4.46 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/RuleContext.cpython-38.pyc ADDED
Binary file (3.65 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/__pycache__/TokenStreamRewriter.cpython-38.pyc ADDED
Binary file (10.3 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATN.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
2
+ # Use of this file is governed by the BSD 3-clause license that
3
+ # can be found in the LICENSE.txt file in the project root.
4
+ #/
5
+ from antlr4.IntervalSet import IntervalSet
6
+
7
+ from antlr4.RuleContext import RuleContext
8
+
9
+ from antlr4.Token import Token
10
+ from antlr4.atn.ATNType import ATNType
11
+ from antlr4.atn.ATNState import ATNState, DecisionState
12
+
13
+
14
+ class ATN(object):
15
+ __slots__ = (
16
+ 'grammarType', 'maxTokenType', 'states', 'decisionToState',
17
+ 'ruleToStartState', 'ruleToStopState', 'modeNameToStartState',
18
+ 'ruleToTokenType', 'lexerActions', 'modeToStartState'
19
+ )
20
+
21
+ INVALID_ALT_NUMBER = 0
22
+
23
+ # Used for runtime deserialization of ATNs from strings#/
24
+ def __init__(self, grammarType:ATNType , maxTokenType:int ):
25
+ # The type of the ATN.
26
+ self.grammarType = grammarType
27
+ # The maximum value for any symbol recognized by a transition in the ATN.
28
+ self.maxTokenType = maxTokenType
29
+ self.states = []
30
+ # Each subrule/rule is a decision point and we must track them so we
31
+ # can go back later and build DFA predictors for them. This includes
32
+ # all the rules, subrules, optional blocks, ()+, ()* etc...
33
+ self.decisionToState = []
34
+ # Maps from rule index to starting state number.
35
+ self.ruleToStartState = []
36
+ # Maps from rule index to stop state number.
37
+ self.ruleToStopState = None
38
+ self.modeNameToStartState = dict()
39
+ # For lexer ATNs, this maps the rule index to the resulting token type.
40
+ # For parser ATNs, this maps the rule index to the generated bypass token
41
+ # type if the
42
+ # {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions}
43
+ # deserialization option was specified; otherwise, this is {@code null}.
44
+ self.ruleToTokenType = None
45
+ # For lexer ATNs, this is an array of {@link LexerAction} objects which may
46
+ # be referenced by action transitions in the ATN.
47
+ self.lexerActions = None
48
+ self.modeToStartState = []
49
+
50
+ # Compute the set of valid tokens that can occur starting in state {@code s}.
51
+ # If {@code ctx} is null, the set of tokens will not include what can follow
52
+ # the rule surrounding {@code s}. In other words, the set will be
53
+ # restricted to tokens reachable staying within {@code s}'s rule.
54
+ def nextTokensInContext(self, s:ATNState, ctx:RuleContext):
55
+ from antlr4.LL1Analyzer import LL1Analyzer
56
+ anal = LL1Analyzer(self)
57
+ return anal.LOOK(s, ctx=ctx)
58
+
59
+ # Compute the set of valid tokens that can occur starting in {@code s} and
60
+ # staying in same rule. {@link Token#EPSILON} is in set if we reach end of
61
+ # rule.
62
+ def nextTokensNoContext(self, s:ATNState):
63
+ if s.nextTokenWithinRule is not None:
64
+ return s.nextTokenWithinRule
65
+ s.nextTokenWithinRule = self.nextTokensInContext(s, None)
66
+ s.nextTokenWithinRule.readonly = True
67
+ return s.nextTokenWithinRule
68
+
69
+ def nextTokens(self, s:ATNState, ctx:RuleContext = None):
70
+ if ctx==None:
71
+ return self.nextTokensNoContext(s)
72
+ else:
73
+ return self.nextTokensInContext(s, ctx)
74
+
75
+ def addState(self, state:ATNState):
76
+ if state is not None:
77
+ state.atn = self
78
+ state.stateNumber = len(self.states)
79
+ self.states.append(state)
80
+
81
+ def removeState(self, state:ATNState):
82
+ self.states[state.stateNumber] = None # just free mem, don't shift states in list
83
+
84
+ def defineDecisionState(self, s:DecisionState):
85
+ self.decisionToState.append(s)
86
+ s.decision = len(self.decisionToState)-1
87
+ return s.decision
88
+
89
+ def getDecisionState(self, decision:int):
90
+ if len(self.decisionToState)==0:
91
+ return None
92
+ else:
93
+ return self.decisionToState[decision]
94
+
95
+ # Computes the set of input symbols which could follow ATN state number
96
+ # {@code stateNumber} in the specified full {@code context}. This method
97
+ # considers the complete parser context, but does not evaluate semantic
98
+ # predicates (i.e. all predicates encountered during the calculation are
99
+ # assumed true). If a path in the ATN exists from the starting state to the
100
+ # {@link RuleStopState} of the outermost context without matching any
101
+ # symbols, {@link Token#EOF} is added to the returned set.
102
+ #
103
+ # <p>If {@code context} is {@code null}, it is treated as
104
+ # {@link ParserRuleContext#EMPTY}.</p>
105
+ #
106
+ # @param stateNumber the ATN state number
107
+ # @param context the full parse context
108
+ # @return The set of potentially valid input symbols which could follow the
109
+ # specified state in the specified context.
110
+ # @throws IllegalArgumentException if the ATN does not contain a state with
111
+ # number {@code stateNumber}
112
+ #/
113
+ def getExpectedTokens(self, stateNumber:int, ctx:RuleContext ):
114
+ if stateNumber < 0 or stateNumber >= len(self.states):
115
+ raise Exception("Invalid state number.")
116
+ s = self.states[stateNumber]
117
+ following = self.nextTokens(s)
118
+ if Token.EPSILON not in following:
119
+ return following
120
+ expected = IntervalSet()
121
+ expected.addSet(following)
122
+ expected.removeOne(Token.EPSILON)
123
+ while (ctx != None and ctx.invokingState >= 0 and Token.EPSILON in following):
124
+ invokingState = self.states[ctx.invokingState]
125
+ rt = invokingState.transitions[0]
126
+ following = self.nextTokens(rt.followState)
127
+ expected.addSet(following)
128
+ expected.removeOne(Token.EPSILON)
129
+ ctx = ctx.parentCtx
130
+ if Token.EPSILON in following:
131
+ expected.addOne(Token.EOF)
132
+ return expected
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfig.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #/
6
+
7
+ # A tuple: (ATN state, predicted alt, syntactic, semantic context).
8
+ # The syntactic context is a graph-structured stack node whose
9
+ # path(s) to the root is the rule invocation(s)
10
+ # chain used to arrive at the state. The semantic context is
11
+ # the tree of semantic predicates encountered before reaching
12
+ # an ATN state.
13
+ #/
14
+ from io import StringIO
15
+ from antlr4.PredictionContext import PredictionContext
16
+ from antlr4.atn.ATNState import ATNState, DecisionState
17
+ from antlr4.atn.LexerActionExecutor import LexerActionExecutor
18
+ from antlr4.atn.SemanticContext import SemanticContext
19
+
20
+ # need a forward declaration
21
+ ATNConfig = None
22
+
23
+ class ATNConfig(object):
24
+ __slots__ = (
25
+ 'state', 'alt', 'context', 'semanticContext', 'reachesIntoOuterContext',
26
+ 'precedenceFilterSuppressed'
27
+ )
28
+
29
+ def __init__(self, state:ATNState=None, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=None, config:ATNConfig=None):
30
+ if config is not None:
31
+ if state is None:
32
+ state = config.state
33
+ if alt is None:
34
+ alt = config.alt
35
+ if context is None:
36
+ context = config.context
37
+ if semantic is None:
38
+ semantic = config.semanticContext
39
+ if semantic is None:
40
+ semantic = SemanticContext.NONE
41
+ # The ATN state associated with this configuration#/
42
+ self.state = state
43
+ # What alt (or lexer rule) is predicted by this configuration#/
44
+ self.alt = alt
45
+ # The stack of invoking states leading to the rule/states associated
46
+ # with this config. We track only those contexts pushed during
47
+ # execution of the ATN simulator.
48
+ self.context = context
49
+ self.semanticContext = semantic
50
+ # We cannot execute predicates dependent upon local context unless
51
+ # we know for sure we are in the correct context. Because there is
52
+ # no way to do this efficiently, we simply cannot evaluate
53
+ # dependent predicates unless we are in the rule that initially
54
+ # invokes the ATN simulator.
55
+ #
56
+ # closure() tracks the depth of how far we dip into the
57
+ # outer context: depth &gt; 0. Note that it may not be totally
58
+ # accurate depth since I don't ever decrement. TODO: make it a boolean then
59
+ self.reachesIntoOuterContext = 0 if config is None else config.reachesIntoOuterContext
60
+ self.precedenceFilterSuppressed = False if config is None else config.precedenceFilterSuppressed
61
+
62
+ # An ATN configuration is equal to another if both have
63
+ # the same state, they predict the same alternative, and
64
+ # syntactic/semantic contexts are the same.
65
+ #/
66
+ def __eq__(self, other):
67
+ if self is other:
68
+ return True
69
+ elif not isinstance(other, ATNConfig):
70
+ return False
71
+ else:
72
+ return self.state.stateNumber==other.state.stateNumber \
73
+ and self.alt==other.alt \
74
+ and ((self.context is other.context) or (self.context==other.context)) \
75
+ and self.semanticContext==other.semanticContext \
76
+ and self.precedenceFilterSuppressed==other.precedenceFilterSuppressed
77
+
78
+ def __hash__(self):
79
+ return hash((self.state.stateNumber, self.alt, self.context, self.semanticContext))
80
+
81
+ def hashCodeForConfigSet(self):
82
+ return hash((self.state.stateNumber, self.alt, hash(self.semanticContext)))
83
+
84
+ def equalsForConfigSet(self, other):
85
+ if self is other:
86
+ return True
87
+ elif not isinstance(other, ATNConfig):
88
+ return False
89
+ else:
90
+ return self.state.stateNumber==other.state.stateNumber \
91
+ and self.alt==other.alt \
92
+ and self.semanticContext==other.semanticContext
93
+
94
+ def __str__(self):
95
+ with StringIO() as buf:
96
+ buf.write('(')
97
+ buf.write(str(self.state))
98
+ buf.write(",")
99
+ buf.write(str(self.alt))
100
+ if self.context is not None:
101
+ buf.write(",[")
102
+ buf.write(str(self.context))
103
+ buf.write("]")
104
+ if self.semanticContext is not None and self.semanticContext is not SemanticContext.NONE:
105
+ buf.write(",")
106
+ buf.write(str(self.semanticContext))
107
+ if self.reachesIntoOuterContext>0:
108
+ buf.write(",up=")
109
+ buf.write(str(self.reachesIntoOuterContext))
110
+ buf.write(')')
111
+ return buf.getvalue()
112
+
113
+ # need a forward declaration
114
+ LexerATNConfig = None
115
+
116
+ class LexerATNConfig(ATNConfig):
117
+ __slots__ = ('lexerActionExecutor', 'passedThroughNonGreedyDecision')
118
+
119
+ def __init__(self, state:ATNState, alt:int=None, context:PredictionContext=None, semantic:SemanticContext=SemanticContext.NONE,
120
+ lexerActionExecutor:LexerActionExecutor=None, config:LexerATNConfig=None):
121
+ super().__init__(state=state, alt=alt, context=context, semantic=semantic, config=config)
122
+ if config is not None:
123
+ if lexerActionExecutor is None:
124
+ lexerActionExecutor = config.lexerActionExecutor
125
+ # This is the backing field for {@link #getLexerActionExecutor}.
126
+ self.lexerActionExecutor = lexerActionExecutor
127
+ self.passedThroughNonGreedyDecision = False if config is None else self.checkNonGreedyDecision(config, state)
128
+
129
+ def __hash__(self):
130
+ return hash((self.state.stateNumber, self.alt, self.context,
131
+ self.semanticContext, self.passedThroughNonGreedyDecision,
132
+ self.lexerActionExecutor))
133
+
134
+ def __eq__(self, other):
135
+ if self is other:
136
+ return True
137
+ elif not isinstance(other, LexerATNConfig):
138
+ return False
139
+ if self.passedThroughNonGreedyDecision != other.passedThroughNonGreedyDecision:
140
+ return False
141
+ if not(self.lexerActionExecutor == other.lexerActionExecutor):
142
+ return False
143
+ return super().__eq__(other)
144
+
145
+
146
+
147
+ def hashCodeForConfigSet(self):
148
+ return hash(self)
149
+
150
+
151
+
152
+ def equalsForConfigSet(self, other):
153
+ return self==other
154
+
155
+
156
+
157
+ def checkNonGreedyDecision(self, source:LexerATNConfig, target:ATNState):
158
+ return source.passedThroughNonGreedyDecision \
159
+ or isinstance(target, DecisionState) and target.nonGreedy
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNConfigSet.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+
6
+ #
7
+ # Specialized {@link Set}{@code <}{@link ATNConfig}{@code >} that can track
8
+ # info about the set, with support for combining similar configurations using a
9
+ # graph-structured stack.
10
+ #/
11
+ from io import StringIO
12
+ from functools import reduce
13
+ from antlr4.PredictionContext import PredictionContext, merge
14
+ from antlr4.Utils import str_list
15
+ from antlr4.atn.ATN import ATN
16
+ from antlr4.atn.ATNConfig import ATNConfig
17
+ from antlr4.atn.SemanticContext import SemanticContext
18
+ from antlr4.error.Errors import UnsupportedOperationException, IllegalStateException
19
+
20
+ ATNSimulator = None
21
+
22
+ class ATNConfigSet(object):
23
+ __slots__ = (
24
+ 'configLookup', 'fullCtx', 'readonly', 'configs', 'uniqueAlt',
25
+ 'conflictingAlts', 'hasSemanticContext', 'dipsIntoOuterContext',
26
+ 'cachedHashCode'
27
+ )
28
+
29
+ #
30
+ # The reason that we need this is because we don't want the hash map to use
31
+ # the standard hash code and equals. We need all configurations with the same
32
+ # {@code (s,i,_,semctx)} to be equal. Unfortunately, this key effectively doubles
33
+ # the number of objects associated with ATNConfigs. The other solution is to
34
+ # use a hash table that lets us specify the equals/hashcode operation.
35
+
36
+ def __init__(self, fullCtx:bool=True):
37
+ # All configs but hashed by (s, i, _, pi) not including context. Wiped out
38
+ # when we go readonly as this set becomes a DFA state.
39
+ self.configLookup = dict()
40
+ # Indicates that this configuration set is part of a full context
41
+ # LL prediction. It will be used to determine how to merge $. With SLL
42
+ # it's a wildcard whereas it is not for LL context merge.
43
+ self.fullCtx = fullCtx
44
+ # Indicates that the set of configurations is read-only. Do not
45
+ # allow any code to manipulate the set; DFA states will point at
46
+ # the sets and they must not change. This does not protect the other
47
+ # fields; in particular, conflictingAlts is set after
48
+ # we've made this readonly.
49
+ self.readonly = False
50
+ # Track the elements as they are added to the set; supports get(i)#/
51
+ self.configs = []
52
+
53
+ # TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation
54
+ # TODO: can we track conflicts as they are added to save scanning configs later?
55
+ self.uniqueAlt = 0
56
+ self.conflictingAlts = None
57
+
58
+ # Used in parser and lexer. In lexer, it indicates we hit a pred
59
+ # while computing a closure operation. Don't make a DFA state from this.
60
+ self.hasSemanticContext = False
61
+ self.dipsIntoOuterContext = False
62
+
63
+ self.cachedHashCode = -1
64
+
65
+ def __iter__(self):
66
+ return self.configs.__iter__()
67
+
68
+ # Adding a new config means merging contexts with existing configs for
69
+ # {@code (s, i, pi, _)}, where {@code s} is the
70
+ # {@link ATNConfig#state}, {@code i} is the {@link ATNConfig#alt}, and
71
+ # {@code pi} is the {@link ATNConfig#semanticContext}. We use
72
+ # {@code (s,i,pi)} as key.
73
+ #
74
+ # <p>This method updates {@link #dipsIntoOuterContext} and
75
+ # {@link #hasSemanticContext} when necessary.</p>
76
+ #/
77
+ def add(self, config:ATNConfig, mergeCache=None):
78
+ if self.readonly:
79
+ raise Exception("This set is readonly")
80
+ if config.semanticContext is not SemanticContext.NONE:
81
+ self.hasSemanticContext = True
82
+ if config.reachesIntoOuterContext > 0:
83
+ self.dipsIntoOuterContext = True
84
+ existing = self.getOrAdd(config)
85
+ if existing is config:
86
+ self.cachedHashCode = -1
87
+ self.configs.append(config) # track order here
88
+ return True
89
+ # a previous (s,i,pi,_), merge with it and save result
90
+ rootIsWildcard = not self.fullCtx
91
+ merged = merge(existing.context, config.context, rootIsWildcard, mergeCache)
92
+ # no need to check for existing.context, config.context in cache
93
+ # since only way to create new graphs is "call rule" and here.
94
+ # We cache at both places.
95
+ existing.reachesIntoOuterContext = max(existing.reachesIntoOuterContext, config.reachesIntoOuterContext)
96
+ # make sure to preserve the precedence filter suppression during the merge
97
+ if config.precedenceFilterSuppressed:
98
+ existing.precedenceFilterSuppressed = True
99
+ existing.context = merged # replace context; no need to alt mapping
100
+ return True
101
+
102
+ def getOrAdd(self, config:ATNConfig):
103
+ h = config.hashCodeForConfigSet()
104
+ l = self.configLookup.get(h, None)
105
+ if l is not None:
106
+ r = next((cfg for cfg in l if config.equalsForConfigSet(cfg)), None)
107
+ if r is not None:
108
+ return r
109
+ if l is None:
110
+ l = [config]
111
+ self.configLookup[h] = l
112
+ else:
113
+ l.append(config)
114
+ return config
115
+
116
+ def getStates(self):
117
+ return set(c.state for c in self.configs)
118
+
119
+ def getPredicates(self):
120
+ return list(cfg.semanticContext for cfg in self.configs if cfg.semanticContext!=SemanticContext.NONE)
121
+
122
+ def get(self, i:int):
123
+ return self.configs[i]
124
+
125
+ def optimizeConfigs(self, interpreter:ATNSimulator):
126
+ if self.readonly:
127
+ raise IllegalStateException("This set is readonly")
128
+ if len(self.configs)==0:
129
+ return
130
+ for config in self.configs:
131
+ config.context = interpreter.getCachedContext(config.context)
132
+
133
+ def addAll(self, coll:list):
134
+ for c in coll:
135
+ self.add(c)
136
+ return False
137
+
138
+ def __eq__(self, other):
139
+ if self is other:
140
+ return True
141
+ elif not isinstance(other, ATNConfigSet):
142
+ return False
143
+
144
+ same = self.configs is not None and \
145
+ self.configs==other.configs and \
146
+ self.fullCtx == other.fullCtx and \
147
+ self.uniqueAlt == other.uniqueAlt and \
148
+ self.conflictingAlts == other.conflictingAlts and \
149
+ self.hasSemanticContext == other.hasSemanticContext and \
150
+ self.dipsIntoOuterContext == other.dipsIntoOuterContext
151
+
152
+ return same
153
+
154
+ def __hash__(self):
155
+ if self.readonly:
156
+ if self.cachedHashCode == -1:
157
+ self.cachedHashCode = self.hashConfigs()
158
+ return self.cachedHashCode
159
+ return self.hashConfigs()
160
+
161
+ def hashConfigs(self):
162
+ return reduce(lambda h, cfg: hash((h, cfg)), self.configs, 0)
163
+
164
+ def __len__(self):
165
+ return len(self.configs)
166
+
167
+ def isEmpty(self):
168
+ return len(self.configs)==0
169
+
170
+ def __contains__(self, config):
171
+ if self.configLookup is None:
172
+ raise UnsupportedOperationException("This method is not implemented for readonly sets.")
173
+ h = config.hashCodeForConfigSet()
174
+ l = self.configLookup.get(h, None)
175
+ if l is not None:
176
+ for c in l:
177
+ if config.equalsForConfigSet(c):
178
+ return True
179
+ return False
180
+
181
+ def clear(self):
182
+ if self.readonly:
183
+ raise IllegalStateException("This set is readonly")
184
+ self.configs.clear()
185
+ self.cachedHashCode = -1
186
+ self.configLookup.clear()
187
+
188
+ def setReadonly(self, readonly:bool):
189
+ self.readonly = readonly
190
+ self.configLookup = None # can't mod, no need for lookup cache
191
+
192
+ def __str__(self):
193
+ with StringIO() as buf:
194
+ buf.write(str_list(self.configs))
195
+ if self.hasSemanticContext:
196
+ buf.write(",hasSemanticContext=")
197
+ buf.write(str(self.hasSemanticContext))
198
+ if self.uniqueAlt!=ATN.INVALID_ALT_NUMBER:
199
+ buf.write(",uniqueAlt=")
200
+ buf.write(str(self.uniqueAlt))
201
+ if self.conflictingAlts is not None:
202
+ buf.write(",conflictingAlts=")
203
+ buf.write(str(self.conflictingAlts))
204
+ if self.dipsIntoOuterContext:
205
+ buf.write(",dipsIntoOuterContext")
206
+ return buf.getvalue()
207
+
208
+
209
+ class OrderedATNConfigSet(ATNConfigSet):
210
+
211
+ def __init__(self):
212
+ super().__init__()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializationOptions.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
2
+ # Use of this file is governed by the BSD 3-clause license that
3
+ # can be found in the LICENSE.txt file in the project root.
4
+
5
+ # need a forward declaration
6
+ ATNDeserializationOptions = None
7
+
8
+ class ATNDeserializationOptions(object):
9
+ __slots__ = ('readonly', 'verifyATN', 'generateRuleBypassTransitions')
10
+
11
+ defaultOptions = None
12
+
13
+ def __init__(self, copyFrom:ATNDeserializationOptions = None):
14
+ self.readonly = False
15
+ self.verifyATN = True if copyFrom is None else copyFrom.verifyATN
16
+ self.generateRuleBypassTransitions = False if copyFrom is None else copyFrom.generateRuleBypassTransitions
17
+
18
+ def __setattr__(self, key, value):
19
+ if key!="readonly" and self.readonly:
20
+ raise Exception("The object is read only.")
21
+ super(type(self), self).__setattr__(key,value)
22
+
23
+ ATNDeserializationOptions.defaultOptions = ATNDeserializationOptions()
24
+ ATNDeserializationOptions.defaultOptions.readonly = True
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNDeserializer.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
2
+ # Use of this file is governed by the BSD 3-clause license that
3
+ # can be found in the LICENSE.txt file in the project root.
4
+ #/
5
+ from uuid import UUID
6
+ from io import StringIO
7
+ from typing import Callable
8
+ from antlr4.Token import Token
9
+ from antlr4.atn.ATN import ATN
10
+ from antlr4.atn.ATNType import ATNType
11
+ from antlr4.atn.ATNState import *
12
+ from antlr4.atn.Transition import *
13
+ from antlr4.atn.LexerAction import *
14
+ from antlr4.atn.ATNDeserializationOptions import ATNDeserializationOptions
15
+
16
+ # This is the earliest supported serialized UUID.
17
+ BASE_SERIALIZED_UUID = UUID("AADB8D7E-AEEF-4415-AD2B-8204D6CF042E")
18
+
19
+ # This UUID indicates the serialized ATN contains two sets of
20
+ # IntervalSets, where the second set's values are encoded as
21
+ # 32-bit integers to support the full Unicode SMP range up to U+10FFFF.
22
+ ADDED_UNICODE_SMP = UUID("59627784-3BE5-417A-B9EB-8131A7286089")
23
+
24
+ # This list contains all of the currently supported UUIDs, ordered by when
25
+ # the feature first appeared in this branch.
26
+ SUPPORTED_UUIDS = [ BASE_SERIALIZED_UUID, ADDED_UNICODE_SMP ]
27
+
28
+ SERIALIZED_VERSION = 3
29
+
30
+ # This is the current serialized UUID.
31
+ SERIALIZED_UUID = ADDED_UNICODE_SMP
32
+
33
+ class ATNDeserializer (object):
34
+ __slots__ = ('deserializationOptions', 'data', 'pos', 'uuid')
35
+
36
+ def __init__(self, options : ATNDeserializationOptions = None):
37
+ if options is None:
38
+ options = ATNDeserializationOptions.defaultOptions
39
+ self.deserializationOptions = options
40
+
41
+ # Determines if a particular serialized representation of an ATN supports
42
+ # a particular feature, identified by the {@link UUID} used for serializing
43
+ # the ATN at the time the feature was first introduced.
44
+ #
45
+ # @param feature The {@link UUID} marking the first time the feature was
46
+ # supported in the serialized ATN.
47
+ # @param actualUuid The {@link UUID} of the actual serialized ATN which is
48
+ # currently being deserialized.
49
+ # @return {@code true} if the {@code actualUuid} value represents a
50
+ # serialized ATN at or after the feature identified by {@code feature} was
51
+ # introduced; otherwise, {@code false}.
52
+
53
+ def isFeatureSupported(self, feature : UUID , actualUuid : UUID ):
54
+ idx1 = SUPPORTED_UUIDS.index(feature)
55
+ if idx1<0:
56
+ return False
57
+ idx2 = SUPPORTED_UUIDS.index(actualUuid)
58
+ return idx2 >= idx1
59
+
60
+ def deserialize(self, data : str):
61
+ self.reset(data)
62
+ self.checkVersion()
63
+ self.checkUUID()
64
+ atn = self.readATN()
65
+ self.readStates(atn)
66
+ self.readRules(atn)
67
+ self.readModes(atn)
68
+ sets = []
69
+ # First, read all sets with 16-bit Unicode code points <= U+FFFF.
70
+ self.readSets(atn, sets, self.readInt)
71
+ # Next, if the ATN was serialized with the Unicode SMP feature,
72
+ # deserialize sets with 32-bit arguments <= U+10FFFF.
73
+ if self.isFeatureSupported(ADDED_UNICODE_SMP, self.uuid):
74
+ self.readSets(atn, sets, self.readInt32)
75
+ self.readEdges(atn, sets)
76
+ self.readDecisions(atn)
77
+ self.readLexerActions(atn)
78
+ self.markPrecedenceDecisions(atn)
79
+ self.verifyATN(atn)
80
+ if self.deserializationOptions.generateRuleBypassTransitions \
81
+ and atn.grammarType == ATNType.PARSER:
82
+ self.generateRuleBypassTransitions(atn)
83
+ # re-verify after modification
84
+ self.verifyATN(atn)
85
+ return atn
86
+
87
+ def reset(self, data:str):
88
+ def adjust(c):
89
+ v = ord(c)
90
+ return v-2 if v>1 else v + 65533
91
+ temp = [ adjust(c) for c in data ]
92
+ # don't adjust the first value since that's the version number
93
+ temp[0] = ord(data[0])
94
+ self.data = temp
95
+ self.pos = 0
96
+
97
+ def checkVersion(self):
98
+ version = self.readInt()
99
+ if version != SERIALIZED_VERSION:
100
+ raise Exception("Could not deserialize ATN with version " + str(version) + " (expected " + str(SERIALIZED_VERSION) + ").")
101
+
102
+ def checkUUID(self):
103
+ uuid = self.readUUID()
104
+ if not uuid in SUPPORTED_UUIDS:
105
+ raise Exception("Could not deserialize ATN with UUID: " + str(uuid) + \
106
+ " (expected " + str(SERIALIZED_UUID) + " or a legacy UUID).", uuid, SERIALIZED_UUID)
107
+ self.uuid = uuid
108
+
109
+ def readATN(self):
110
+ idx = self.readInt()
111
+ grammarType = ATNType.fromOrdinal(idx)
112
+ maxTokenType = self.readInt()
113
+ return ATN(grammarType, maxTokenType)
114
+
115
+ def readStates(self, atn:ATN):
116
+ loopBackStateNumbers = []
117
+ endStateNumbers = []
118
+ nstates = self.readInt()
119
+ for i in range(0, nstates):
120
+ stype = self.readInt()
121
+ # ignore bad type of states
122
+ if stype==ATNState.INVALID_TYPE:
123
+ atn.addState(None)
124
+ continue
125
+ ruleIndex = self.readInt()
126
+ if ruleIndex == 0xFFFF:
127
+ ruleIndex = -1
128
+
129
+ s = self.stateFactory(stype, ruleIndex)
130
+ if stype == ATNState.LOOP_END: # special case
131
+ loopBackStateNumber = self.readInt()
132
+ loopBackStateNumbers.append((s, loopBackStateNumber))
133
+ elif isinstance(s, BlockStartState):
134
+ endStateNumber = self.readInt()
135
+ endStateNumbers.append((s, endStateNumber))
136
+
137
+ atn.addState(s)
138
+
139
+ # delay the assignment of loop back and end states until we know all the state instances have been initialized
140
+ for pair in loopBackStateNumbers:
141
+ pair[0].loopBackState = atn.states[pair[1]]
142
+
143
+ for pair in endStateNumbers:
144
+ pair[0].endState = atn.states[pair[1]]
145
+
146
+ numNonGreedyStates = self.readInt()
147
+ for i in range(0, numNonGreedyStates):
148
+ stateNumber = self.readInt()
149
+ atn.states[stateNumber].nonGreedy = True
150
+
151
+ numPrecedenceStates = self.readInt()
152
+ for i in range(0, numPrecedenceStates):
153
+ stateNumber = self.readInt()
154
+ atn.states[stateNumber].isPrecedenceRule = True
155
+
156
+ def readRules(self, atn:ATN):
157
+ nrules = self.readInt()
158
+ if atn.grammarType == ATNType.LEXER:
159
+ atn.ruleToTokenType = [0] * nrules
160
+
161
+ atn.ruleToStartState = [0] * nrules
162
+ for i in range(0, nrules):
163
+ s = self.readInt()
164
+ startState = atn.states[s]
165
+ atn.ruleToStartState[i] = startState
166
+ if atn.grammarType == ATNType.LEXER:
167
+ tokenType = self.readInt()
168
+ if tokenType == 0xFFFF:
169
+ tokenType = Token.EOF
170
+
171
+ atn.ruleToTokenType[i] = tokenType
172
+
173
+ atn.ruleToStopState = [0] * nrules
174
+ for state in atn.states:
175
+ if not isinstance(state, RuleStopState):
176
+ continue
177
+ atn.ruleToStopState[state.ruleIndex] = state
178
+ atn.ruleToStartState[state.ruleIndex].stopState = state
179
+
180
+ def readModes(self, atn:ATN):
181
+ nmodes = self.readInt()
182
+ for i in range(0, nmodes):
183
+ s = self.readInt()
184
+ atn.modeToStartState.append(atn.states[s])
185
+
186
+ def readSets(self, atn:ATN, sets:list, readUnicode:Callable[[], int]):
187
+ m = self.readInt()
188
+ for i in range(0, m):
189
+ iset = IntervalSet()
190
+ sets.append(iset)
191
+ n = self.readInt()
192
+ containsEof = self.readInt()
193
+ if containsEof!=0:
194
+ iset.addOne(-1)
195
+ for j in range(0, n):
196
+ i1 = readUnicode()
197
+ i2 = readUnicode()
198
+ iset.addRange(range(i1, i2 + 1)) # range upper limit is exclusive
199
+
200
+ def readEdges(self, atn:ATN, sets:list):
201
+ nedges = self.readInt()
202
+ for i in range(0, nedges):
203
+ src = self.readInt()
204
+ trg = self.readInt()
205
+ ttype = self.readInt()
206
+ arg1 = self.readInt()
207
+ arg2 = self.readInt()
208
+ arg3 = self.readInt()
209
+ trans = self.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
210
+ srcState = atn.states[src]
211
+ srcState.addTransition(trans)
212
+
213
+ # edges for rule stop states can be derived, so they aren't serialized
214
+ for state in atn.states:
215
+ for i in range(0, len(state.transitions)):
216
+ t = state.transitions[i]
217
+ if not isinstance(t, RuleTransition):
218
+ continue
219
+ outermostPrecedenceReturn = -1
220
+ if atn.ruleToStartState[t.target.ruleIndex].isPrecedenceRule:
221
+ if t.precedence == 0:
222
+ outermostPrecedenceReturn = t.target.ruleIndex
223
+ trans = EpsilonTransition(t.followState, outermostPrecedenceReturn)
224
+ atn.ruleToStopState[t.target.ruleIndex].addTransition(trans)
225
+
226
+ for state in atn.states:
227
+ if isinstance(state, BlockStartState):
228
+ # we need to know the end state to set its start state
229
+ if state.endState is None:
230
+ raise Exception("IllegalState")
231
+ # block end states can only be associated to a single block start state
232
+ if state.endState.startState is not None:
233
+ raise Exception("IllegalState")
234
+ state.endState.startState = state
235
+
236
+ if isinstance(state, PlusLoopbackState):
237
+ for i in range(0, len(state.transitions)):
238
+ target = state.transitions[i].target
239
+ if isinstance(target, PlusBlockStartState):
240
+ target.loopBackState = state
241
+ elif isinstance(state, StarLoopbackState):
242
+ for i in range(0, len(state.transitions)):
243
+ target = state.transitions[i].target
244
+ if isinstance(target, StarLoopEntryState):
245
+ target.loopBackState = state
246
+
247
+ def readDecisions(self, atn:ATN):
248
+ ndecisions = self.readInt()
249
+ for i in range(0, ndecisions):
250
+ s = self.readInt()
251
+ decState = atn.states[s]
252
+ atn.decisionToState.append(decState)
253
+ decState.decision = i
254
+
255
+ def readLexerActions(self, atn:ATN):
256
+ if atn.grammarType == ATNType.LEXER:
257
+ count = self.readInt()
258
+ atn.lexerActions = [ None ] * count
259
+ for i in range(0, count):
260
+ actionType = self.readInt()
261
+ data1 = self.readInt()
262
+ if data1 == 0xFFFF:
263
+ data1 = -1
264
+ data2 = self.readInt()
265
+ if data2 == 0xFFFF:
266
+ data2 = -1
267
+ lexerAction = self.lexerActionFactory(actionType, data1, data2)
268
+ atn.lexerActions[i] = lexerAction
269
+
270
+ def generateRuleBypassTransitions(self, atn:ATN):
271
+
272
+ count = len(atn.ruleToStartState)
273
+ atn.ruleToTokenType = [ 0 ] * count
274
+ for i in range(0, count):
275
+ atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
276
+
277
+ for i in range(0, count):
278
+ self.generateRuleBypassTransition(atn, i)
279
+
280
+ def generateRuleBypassTransition(self, atn:ATN, idx:int):
281
+
282
+ bypassStart = BasicBlockStartState()
283
+ bypassStart.ruleIndex = idx
284
+ atn.addState(bypassStart)
285
+
286
+ bypassStop = BlockEndState()
287
+ bypassStop.ruleIndex = idx
288
+ atn.addState(bypassStop)
289
+
290
+ bypassStart.endState = bypassStop
291
+ atn.defineDecisionState(bypassStart)
292
+
293
+ bypassStop.startState = bypassStart
294
+
295
+ excludeTransition = None
296
+
297
+ if atn.ruleToStartState[idx].isPrecedenceRule:
298
+ # wrap from the beginning of the rule to the StarLoopEntryState
299
+ endState = None
300
+ for state in atn.states:
301
+ if self.stateIsEndStateFor(state, idx):
302
+ endState = state
303
+ excludeTransition = state.loopBackState.transitions[0]
304
+ break
305
+
306
+ if excludeTransition is None:
307
+ raise Exception("Couldn't identify final state of the precedence rule prefix section.")
308
+
309
+ else:
310
+
311
+ endState = atn.ruleToStopState[idx]
312
+
313
+ # all non-excluded transitions that currently target end state need to target blockEnd instead
314
+ for state in atn.states:
315
+ for transition in state.transitions:
316
+ if transition == excludeTransition:
317
+ continue
318
+ if transition.target == endState:
319
+ transition.target = bypassStop
320
+
321
+ # all transitions leaving the rule start state need to leave blockStart instead
322
+ ruleToStartState = atn.ruleToStartState[idx]
323
+ count = len(ruleToStartState.transitions)
324
+ while count > 0:
325
+ bypassStart.addTransition(ruleToStartState.transitions[count-1])
326
+ del ruleToStartState.transitions[-1]
327
+
328
+ # link the new states
329
+ atn.ruleToStartState[idx].addTransition(EpsilonTransition(bypassStart))
330
+ bypassStop.addTransition(EpsilonTransition(endState))
331
+
332
+ matchState = BasicState()
333
+ atn.addState(matchState)
334
+ matchState.addTransition(AtomTransition(bypassStop, atn.ruleToTokenType[idx]))
335
+ bypassStart.addTransition(EpsilonTransition(matchState))
336
+
337
+
338
+ def stateIsEndStateFor(self, state:ATNState, idx:int):
339
+ if state.ruleIndex != idx:
340
+ return None
341
+ if not isinstance(state, StarLoopEntryState):
342
+ return None
343
+
344
+ maybeLoopEndState = state.transitions[len(state.transitions) - 1].target
345
+ if not isinstance(maybeLoopEndState, LoopEndState):
346
+ return None
347
+
348
+ if maybeLoopEndState.epsilonOnlyTransitions and \
349
+ isinstance(maybeLoopEndState.transitions[0].target, RuleStopState):
350
+ return state
351
+ else:
352
+ return None
353
+
354
+
355
+ #
356
+ # Analyze the {@link StarLoopEntryState} states in the specified ATN to set
357
+ # the {@link StarLoopEntryState#isPrecedenceDecision} field to the
358
+ # correct value.
359
+ #
360
+ # @param atn The ATN.
361
+ #
362
+ def markPrecedenceDecisions(self, atn:ATN):
363
+ for state in atn.states:
364
+ if not isinstance(state, StarLoopEntryState):
365
+ continue
366
+
367
+ # We analyze the ATN to determine if this ATN decision state is the
368
+ # decision for the closure block that determines whether a
369
+ # precedence rule should continue or complete.
370
+ #
371
+ if atn.ruleToStartState[state.ruleIndex].isPrecedenceRule:
372
+ maybeLoopEndState = state.transitions[len(state.transitions) - 1].target
373
+ if isinstance(maybeLoopEndState, LoopEndState):
374
+ if maybeLoopEndState.epsilonOnlyTransitions and \
375
+ isinstance(maybeLoopEndState.transitions[0].target, RuleStopState):
376
+ state.isPrecedenceDecision = True
377
+
378
+ def verifyATN(self, atn:ATN):
379
+ if not self.deserializationOptions.verifyATN:
380
+ return
381
+ # verify assumptions
382
+ for state in atn.states:
383
+ if state is None:
384
+ continue
385
+
386
+ self.checkCondition(state.epsilonOnlyTransitions or len(state.transitions) <= 1)
387
+
388
+ if isinstance(state, PlusBlockStartState):
389
+ self.checkCondition(state.loopBackState is not None)
390
+
391
+ if isinstance(state, StarLoopEntryState):
392
+ self.checkCondition(state.loopBackState is not None)
393
+ self.checkCondition(len(state.transitions) == 2)
394
+
395
+ if isinstance(state.transitions[0].target, StarBlockStartState):
396
+ self.checkCondition(isinstance(state.transitions[1].target, LoopEndState))
397
+ self.checkCondition(not state.nonGreedy)
398
+ elif isinstance(state.transitions[0].target, LoopEndState):
399
+ self.checkCondition(isinstance(state.transitions[1].target, StarBlockStartState))
400
+ self.checkCondition(state.nonGreedy)
401
+ else:
402
+ raise Exception("IllegalState")
403
+
404
+ if isinstance(state, StarLoopbackState):
405
+ self.checkCondition(len(state.transitions) == 1)
406
+ self.checkCondition(isinstance(state.transitions[0].target, StarLoopEntryState))
407
+
408
+ if isinstance(state, LoopEndState):
409
+ self.checkCondition(state.loopBackState is not None)
410
+
411
+ if isinstance(state, RuleStartState):
412
+ self.checkCondition(state.stopState is not None)
413
+
414
+ if isinstance(state, BlockStartState):
415
+ self.checkCondition(state.endState is not None)
416
+
417
+ if isinstance(state, BlockEndState):
418
+ self.checkCondition(state.startState is not None)
419
+
420
+ if isinstance(state, DecisionState):
421
+ self.checkCondition(len(state.transitions) <= 1 or state.decision >= 0)
422
+ else:
423
+ self.checkCondition(len(state.transitions) <= 1 or isinstance(state, RuleStopState))
424
+
425
+ def checkCondition(self, condition:bool, message=None):
426
+ if not condition:
427
+ if message is None:
428
+ message = "IllegalState"
429
+ raise Exception(message)
430
+
431
+ def readInt(self):
432
+ i = self.data[self.pos]
433
+ self.pos += 1
434
+ return i
435
+
436
+ def readInt32(self):
437
+ low = self.readInt()
438
+ high = self.readInt()
439
+ return low | (high << 16)
440
+
441
+ def readLong(self):
442
+ low = self.readInt32()
443
+ high = self.readInt32()
444
+ return (low & 0x00000000FFFFFFFF) | (high << 32)
445
+
446
+ def readUUID(self):
447
+ low = self.readLong()
448
+ high = self.readLong()
449
+ allBits = (low & 0xFFFFFFFFFFFFFFFF) | (high << 64)
450
+ return UUID(int=allBits)
451
+
452
+ edgeFactories = [ lambda args : None,
453
+ lambda atn, src, trg, arg1, arg2, arg3, sets, target : EpsilonTransition(target),
454
+ lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
455
+ RangeTransition(target, Token.EOF, arg2) if arg3 != 0 else RangeTransition(target, arg1, arg2),
456
+ lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
457
+ RuleTransition(atn.states[arg1], arg2, arg3, target),
458
+ lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
459
+ PredicateTransition(target, arg1, arg2, arg3 != 0),
460
+ lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
461
+ AtomTransition(target, Token.EOF) if arg3 != 0 else AtomTransition(target, arg1),
462
+ lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
463
+ ActionTransition(target, arg1, arg2, arg3 != 0),
464
+ lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
465
+ SetTransition(target, sets[arg1]),
466
+ lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
467
+ NotSetTransition(target, sets[arg1]),
468
+ lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
469
+ WildcardTransition(target),
470
+ lambda atn, src, trg, arg1, arg2, arg3, sets, target : \
471
+ PrecedencePredicateTransition(target, arg1)
472
+ ]
473
+
474
+ def edgeFactory(self, atn:ATN, type:int, src:int, trg:int, arg1:int, arg2:int, arg3:int, sets:list):
475
+ target = atn.states[trg]
476
+ if type > len(self.edgeFactories) or self.edgeFactories[type] is None:
477
+ raise Exception("The specified transition type: " + str(type) + " is not valid.")
478
+ else:
479
+ return self.edgeFactories[type](atn, src, trg, arg1, arg2, arg3, sets, target)
480
+
481
+ stateFactories = [ lambda : None,
482
+ lambda : BasicState(),
483
+ lambda : RuleStartState(),
484
+ lambda : BasicBlockStartState(),
485
+ lambda : PlusBlockStartState(),
486
+ lambda : StarBlockStartState(),
487
+ lambda : TokensStartState(),
488
+ lambda : RuleStopState(),
489
+ lambda : BlockEndState(),
490
+ lambda : StarLoopbackState(),
491
+ lambda : StarLoopEntryState(),
492
+ lambda : PlusLoopbackState(),
493
+ lambda : LoopEndState()
494
+ ]
495
+
496
+ def stateFactory(self, type:int, ruleIndex:int):
497
+ if type> len(self.stateFactories) or self.stateFactories[type] is None:
498
+ raise Exception("The specified state type " + str(type) + " is not valid.")
499
+ else:
500
+ s = self.stateFactories[type]()
501
+ if s is not None:
502
+ s.ruleIndex = ruleIndex
503
+ return s
504
+
505
+ CHANNEL = 0 #The type of a {@link LexerChannelAction} action.
506
+ CUSTOM = 1 #The type of a {@link LexerCustomAction} action.
507
+ MODE = 2 #The type of a {@link LexerModeAction} action.
508
+ MORE = 3 #The type of a {@link LexerMoreAction} action.
509
+ POP_MODE = 4 #The type of a {@link LexerPopModeAction} action.
510
+ PUSH_MODE = 5 #The type of a {@link LexerPushModeAction} action.
511
+ SKIP = 6 #The type of a {@link LexerSkipAction} action.
512
+ TYPE = 7 #The type of a {@link LexerTypeAction} action.
513
+
514
+ actionFactories = [ lambda data1, data2: LexerChannelAction(data1),
515
+ lambda data1, data2: LexerCustomAction(data1, data2),
516
+ lambda data1, data2: LexerModeAction(data1),
517
+ lambda data1, data2: LexerMoreAction.INSTANCE,
518
+ lambda data1, data2: LexerPopModeAction.INSTANCE,
519
+ lambda data1, data2: LexerPushModeAction(data1),
520
+ lambda data1, data2: LexerSkipAction.INSTANCE,
521
+ lambda data1, data2: LexerTypeAction(data1)
522
+ ]
523
+
524
+ def lexerActionFactory(self, type:int, data1:int, data2:int):
525
+
526
+ if type > len(self.actionFactories) or self.actionFactories[type] is None:
527
+ raise Exception("The specified lexer action type " + str(type) + " is not valid.")
528
+ else:
529
+ return self.actionFactories[type](data1, data2)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNSimulator.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #/
6
+ from antlr4.PredictionContext import PredictionContextCache, PredictionContext, getCachedPredictionContext
7
+ from antlr4.atn.ATN import ATN
8
+ from antlr4.atn.ATNConfigSet import ATNConfigSet
9
+ from antlr4.dfa.DFAState import DFAState
10
+
11
+
12
+ class ATNSimulator(object):
13
+ __slots__ = ('atn', 'sharedContextCache', '__dict__')
14
+
15
+ # Must distinguish between missing edge and edge we know leads nowhere#/
16
+ ERROR = DFAState(configs=ATNConfigSet())
17
+ ERROR.stateNumber = 0x7FFFFFFF
18
+
19
+ # The context cache maps all PredictionContext objects that are ==
20
+ # to a single cached copy. This cache is shared across all contexts
21
+ # in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet
22
+ # to use only cached nodes/graphs in addDFAState(). We don't want to
23
+ # fill this during closure() since there are lots of contexts that
24
+ # pop up but are not used ever again. It also greatly slows down closure().
25
+ #
26
+ # <p>This cache makes a huge difference in memory and a little bit in speed.
27
+ # For the Java grammar on java.*, it dropped the memory requirements
28
+ # at the end from 25M to 16M. We don't store any of the full context
29
+ # graphs in the DFA because they are limited to local context only,
30
+ # but apparently there's a lot of repetition there as well. We optimize
31
+ # the config contexts before storing the config set in the DFA states
32
+ # by literally rebuilding them with cached subgraphs only.</p>
33
+ #
34
+ # <p>I tried a cache for use during closure operations, that was
35
+ # whacked after each adaptivePredict(). It cost a little bit
36
+ # more time I think and doesn't save on the overall footprint
37
+ # so it's not worth the complexity.</p>
38
+ #/
39
+ def __init__(self, atn:ATN, sharedContextCache:PredictionContextCache):
40
+ self.atn = atn
41
+ self.sharedContextCache = sharedContextCache
42
+
43
+ def getCachedContext(self, context:PredictionContext):
44
+ if self.sharedContextCache is None:
45
+ return context
46
+ visited = dict()
47
+ return getCachedPredictionContext(context, self.sharedContextCache, visited)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNState.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ # The following images show the relation of states and
8
+ # {@link ATNState#transitions} for various grammar constructs.
9
+ #
10
+ # <ul>
11
+ #
12
+ # <li>Solid edges marked with an &#0949; indicate a required
13
+ # {@link EpsilonTransition}.</li>
14
+ #
15
+ # <li>Dashed edges indicate locations where any transition derived from
16
+ # {@link Transition} might appear.</li>
17
+ #
18
+ # <li>Dashed nodes are place holders for either a sequence of linked
19
+ # {@link BasicState} states or the inclusion of a block representing a nested
20
+ # construct in one of the forms below.</li>
21
+ #
22
+ # <li>Nodes showing multiple outgoing alternatives with a {@code ...} support
23
+ # any number of alternatives (one or more). Nodes without the {@code ...} only
24
+ # support the exact number of alternatives shown in the diagram.</li>
25
+ #
26
+ # </ul>
27
+ #
28
+ # <h2>Basic Blocks</h2>
29
+ #
30
+ # <h3>Rule</h3>
31
+ #
32
+ # <embed src="images/Rule.svg" type="image/svg+xml"/>
33
+ #
34
+ # <h3>Block of 1 or more alternatives</h3>
35
+ #
36
+ # <embed src="images/Block.svg" type="image/svg+xml"/>
37
+ #
38
+ # <h2>Greedy Loops</h2>
39
+ #
40
+ # <h3>Greedy Closure: {@code (...)*}</h3>
41
+ #
42
+ # <embed src="images/ClosureGreedy.svg" type="image/svg+xml"/>
43
+ #
44
+ # <h3>Greedy Positive Closure: {@code (...)+}</h3>
45
+ #
46
+ # <embed src="images/PositiveClosureGreedy.svg" type="image/svg+xml"/>
47
+ #
48
+ # <h3>Greedy Optional: {@code (...)?}</h3>
49
+ #
50
+ # <embed src="images/OptionalGreedy.svg" type="image/svg+xml"/>
51
+ #
52
+ # <h2>Non-Greedy Loops</h2>
53
+ #
54
+ # <h3>Non-Greedy Closure: {@code (...)*?}</h3>
55
+ #
56
+ # <embed src="images/ClosureNonGreedy.svg" type="image/svg+xml"/>
57
+ #
58
+ # <h3>Non-Greedy Positive Closure: {@code (...)+?}</h3>
59
+ #
60
+ # <embed src="images/PositiveClosureNonGreedy.svg" type="image/svg+xml"/>
61
+ #
62
+ # <h3>Non-Greedy Optional: {@code (...)??}</h3>
63
+ #
64
+ # <embed src="images/OptionalNonGreedy.svg" type="image/svg+xml"/>
65
+ #
66
+
67
+ from antlr4.atn.Transition import Transition
68
+
69
+ INITIAL_NUM_TRANSITIONS = 4
70
+
71
+ class ATNState(object):
72
+ __slots__ = (
73
+ 'atn', 'stateNumber', 'stateType', 'ruleIndex', 'epsilonOnlyTransitions',
74
+ 'transitions', 'nextTokenWithinRule',
75
+ )
76
+
77
+ # constants for serialization
78
+ INVALID_TYPE = 0
79
+ BASIC = 1
80
+ RULE_START = 2
81
+ BLOCK_START = 3
82
+ PLUS_BLOCK_START = 4
83
+ STAR_BLOCK_START = 5
84
+ TOKEN_START = 6
85
+ RULE_STOP = 7
86
+ BLOCK_END = 8
87
+ STAR_LOOP_BACK = 9
88
+ STAR_LOOP_ENTRY = 10
89
+ PLUS_LOOP_BACK = 11
90
+ LOOP_END = 12
91
+
92
+ serializationNames = [
93
+ "INVALID",
94
+ "BASIC",
95
+ "RULE_START",
96
+ "BLOCK_START",
97
+ "PLUS_BLOCK_START",
98
+ "STAR_BLOCK_START",
99
+ "TOKEN_START",
100
+ "RULE_STOP",
101
+ "BLOCK_END",
102
+ "STAR_LOOP_BACK",
103
+ "STAR_LOOP_ENTRY",
104
+ "PLUS_LOOP_BACK",
105
+ "LOOP_END" ]
106
+
107
+ INVALID_STATE_NUMBER = -1
108
+
109
+ def __init__(self):
110
+ # Which ATN are we in?
111
+ self.atn = None
112
+ self.stateNumber = ATNState.INVALID_STATE_NUMBER
113
+ self.stateType = None
114
+ self.ruleIndex = 0 # at runtime, we don't have Rule objects
115
+ self.epsilonOnlyTransitions = False
116
+ # Track the transitions emanating from this ATN state.
117
+ self.transitions = []
118
+ # Used to cache lookahead during parsing, not used during construction
119
+ self.nextTokenWithinRule = None
120
+
121
+ def __hash__(self):
122
+ return self.stateNumber
123
+
124
+ def __eq__(self, other):
125
+ return isinstance(other, ATNState) and self.stateNumber==other.stateNumber
126
+
127
+ def onlyHasEpsilonTransitions(self):
128
+ return self.epsilonOnlyTransitions
129
+
130
+ def isNonGreedyExitState(self):
131
+ return False
132
+
133
+ def __str__(self):
134
+ return str(self.stateNumber)
135
+
136
+ def addTransition(self, trans:Transition, index:int=-1):
137
+ if len(self.transitions)==0:
138
+ self.epsilonOnlyTransitions = trans.isEpsilon
139
+ elif self.epsilonOnlyTransitions != trans.isEpsilon:
140
+ self.epsilonOnlyTransitions = False
141
+ # TODO System.err.format(Locale.getDefault(), "ATN state %d has both epsilon and non-epsilon transitions.\n", stateNumber);
142
+ if index==-1:
143
+ self.transitions.append(trans)
144
+ else:
145
+ self.transitions.insert(index, trans)
146
+
147
+ class BasicState(ATNState):
148
+
149
+ def __init__(self):
150
+ super().__init__()
151
+ self.stateType = self.BASIC
152
+
153
+
154
+ class DecisionState(ATNState):
155
+ __slots__ = ('decision', 'nonGreedy')
156
+ def __init__(self):
157
+ super().__init__()
158
+ self.decision = -1
159
+ self.nonGreedy = False
160
+
161
+ # The start of a regular {@code (...)} block.
162
+ class BlockStartState(DecisionState):
163
+ __slots__ = 'endState'
164
+
165
+ def __init__(self):
166
+ super().__init__()
167
+ self.endState = None
168
+
169
+ class BasicBlockStartState(BlockStartState):
170
+
171
+ def __init__(self):
172
+ super().__init__()
173
+ self.stateType = self.BLOCK_START
174
+
175
+ # Terminal node of a simple {@code (a|b|c)} block.
176
+ class BlockEndState(ATNState):
177
+ __slots__ = 'startState'
178
+
179
+ def __init__(self):
180
+ super().__init__()
181
+ self.stateType = self.BLOCK_END
182
+ self.startState = None
183
+
184
+ # The last node in the ATN for a rule, unless that rule is the start symbol.
185
+ # In that case, there is one transition to EOF. Later, we might encode
186
+ # references to all calls to this rule to compute FOLLOW sets for
187
+ # error handling.
188
+ #
189
+ class RuleStopState(ATNState):
190
+
191
+ def __init__(self):
192
+ super().__init__()
193
+ self.stateType = self.RULE_STOP
194
+
195
+ class RuleStartState(ATNState):
196
+ __slots__ = ('stopState', 'isPrecedenceRule')
197
+
198
+ def __init__(self):
199
+ super().__init__()
200
+ self.stateType = self.RULE_START
201
+ self.stopState = None
202
+ self.isPrecedenceRule = False
203
+
204
+ # Decision state for {@code A+} and {@code (A|B)+}. It has two transitions:
205
+ # one to the loop back to start of the block and one to exit.
206
+ #
207
+ class PlusLoopbackState(DecisionState):
208
+
209
+ def __init__(self):
210
+ super().__init__()
211
+ self.stateType = self.PLUS_LOOP_BACK
212
+
213
+ # Start of {@code (A|B|...)+} loop. Technically a decision state, but
214
+ # we don't use for code generation; somebody might need it, so I'm defining
215
+ # it for completeness. In reality, the {@link PlusLoopbackState} node is the
216
+ # real decision-making note for {@code A+}.
217
+ #
218
+ class PlusBlockStartState(BlockStartState):
219
+ __slots__ = 'loopBackState'
220
+
221
+ def __init__(self):
222
+ super().__init__()
223
+ self.stateType = self.PLUS_BLOCK_START
224
+ self.loopBackState = None
225
+
226
+ # The block that begins a closure loop.
227
+ class StarBlockStartState(BlockStartState):
228
+
229
+ def __init__(self):
230
+ super().__init__()
231
+ self.stateType = self.STAR_BLOCK_START
232
+
233
+ class StarLoopbackState(ATNState):
234
+
235
+ def __init__(self):
236
+ super().__init__()
237
+ self.stateType = self.STAR_LOOP_BACK
238
+
239
+
240
+ class StarLoopEntryState(DecisionState):
241
+ __slots__ = ('loopBackState', 'isPrecedenceDecision')
242
+
243
+ def __init__(self):
244
+ super().__init__()
245
+ self.stateType = self.STAR_LOOP_ENTRY
246
+ self.loopBackState = None
247
+ # Indicates whether this state can benefit from a precedence DFA during SLL decision making.
248
+ self.isPrecedenceDecision = None
249
+
250
+ # Mark the end of a * or + loop.
251
+ class LoopEndState(ATNState):
252
+ __slots__ = 'loopBackState'
253
+
254
+ def __init__(self):
255
+ super().__init__()
256
+ self.stateType = self.LOOP_END
257
+ self.loopBackState = None
258
+
259
+ # The Tokens rule start state linking to each lexer rule start state */
260
+ class TokensStartState(DecisionState):
261
+
262
+ def __init__(self):
263
+ super().__init__()
264
+ self.stateType = self.TOKEN_START
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ATNType.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
2
+ # Use of this file is governed by the BSD 3-clause license that
3
+ # can be found in the LICENSE.txt file in the project root.
4
+ #/
5
+
6
+ from enum import IntEnum
7
+
8
+ # Represents the type of recognizer an ATN applies to.
9
+
10
+ class ATNType(IntEnum):
11
+
12
+ LEXER = 0
13
+ PARSER = 1
14
+
15
+ @classmethod
16
+ def fromOrdinal(cls, i:int):
17
+ return cls._value2member_map_[i]
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerATNSimulator.py ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #/
6
+
7
+ # When we hit an accept state in either the DFA or the ATN, we
8
+ # have to notify the character stream to start buffering characters
9
+ # via {@link IntStream#mark} and record the current state. The current sim state
10
+ # includes the current index into the input, the current line,
11
+ # and current character position in that line. Note that the Lexer is
12
+ # tracking the starting line and characterization of the token. These
13
+ # variables track the "state" of the simulator when it hits an accept state.
14
+ #
15
+ # <p>We track these variables separately for the DFA and ATN simulation
16
+ # because the DFA simulation often has to fail over to the ATN
17
+ # simulation. If the ATN simulation fails, we need the DFA to fall
18
+ # back to its previously accepted state, if any. If the ATN succeeds,
19
+ # then the ATN does the accept and the DFA simulator that invoked it
20
+ # can simply return the predicted token type.</p>
21
+ #/
22
+
23
+ from antlr4.PredictionContext import PredictionContextCache, SingletonPredictionContext, PredictionContext
24
+ from antlr4.InputStream import InputStream
25
+ from antlr4.Token import Token
26
+ from antlr4.atn.ATN import ATN
27
+ from antlr4.atn.ATNConfig import LexerATNConfig
28
+ from antlr4.atn.ATNSimulator import ATNSimulator
29
+ from antlr4.atn.ATNConfigSet import ATNConfigSet, OrderedATNConfigSet
30
+ from antlr4.atn.ATNState import RuleStopState, ATNState
31
+ from antlr4.atn.LexerActionExecutor import LexerActionExecutor
32
+ from antlr4.atn.Transition import Transition
33
+ from antlr4.dfa.DFAState import DFAState
34
+ from antlr4.error.Errors import LexerNoViableAltException, UnsupportedOperationException
35
+
36
+ class SimState(object):
37
+ __slots__ = ('index', 'line', 'column', 'dfaState')
38
+
39
+ def __init__(self):
40
+ self.reset()
41
+
42
+ def reset(self):
43
+ self.index = -1
44
+ self.line = 0
45
+ self.column = -1
46
+ self.dfaState = None
47
+
48
+ # need forward declaration
49
+ Lexer = None
50
+ LexerATNSimulator = None
51
+
52
+ class LexerATNSimulator(ATNSimulator):
53
+ __slots__ = (
54
+ 'decisionToDFA', 'recog', 'startIndex', 'line', 'column', 'mode',
55
+ 'DEFAULT_MODE', 'MAX_CHAR_VALUE', 'prevAccept'
56
+ )
57
+
58
+ debug = False
59
+ dfa_debug = False
60
+
61
+ MIN_DFA_EDGE = 0
62
+ MAX_DFA_EDGE = 127 # forces unicode to stay in ATN
63
+
64
+ ERROR = None
65
+
66
+ def __init__(self, recog:Lexer, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache):
67
+ super().__init__(atn, sharedContextCache)
68
+ self.decisionToDFA = decisionToDFA
69
+ self.recog = recog
70
+ # The current token's starting index into the character stream.
71
+ # Shared across DFA to ATN simulation in case the ATN fails and the
72
+ # DFA did not have a previous accept state. In this case, we use the
73
+ # ATN-generated exception object.
74
+ self.startIndex = -1
75
+ # line number 1..n within the input#/
76
+ self.line = 1
77
+ # The index of the character relative to the beginning of the line 0..n-1#/
78
+ self.column = 0
79
+ from antlr4.Lexer import Lexer
80
+ self.mode = Lexer.DEFAULT_MODE
81
+ # Cache Lexer properties to avoid further imports
82
+ self.DEFAULT_MODE = Lexer.DEFAULT_MODE
83
+ self.MAX_CHAR_VALUE = Lexer.MAX_CHAR_VALUE
84
+ # Used during DFA/ATN exec to record the most recent accept configuration info
85
+ self.prevAccept = SimState()
86
+
87
+
88
+ def copyState(self, simulator:LexerATNSimulator ):
89
+ self.column = simulator.column
90
+ self.line = simulator.line
91
+ self.mode = simulator.mode
92
+ self.startIndex = simulator.startIndex
93
+
94
+ def match(self, input:InputStream , mode:int):
95
+ self.mode = mode
96
+ mark = input.mark()
97
+ try:
98
+ self.startIndex = input.index
99
+ self.prevAccept.reset()
100
+ dfa = self.decisionToDFA[mode]
101
+ if dfa.s0 is None:
102
+ return self.matchATN(input)
103
+ else:
104
+ return self.execATN(input, dfa.s0)
105
+ finally:
106
+ input.release(mark)
107
+
108
+ def reset(self):
109
+ self.prevAccept.reset()
110
+ self.startIndex = -1
111
+ self.line = 1
112
+ self.column = 0
113
+ self.mode = self.DEFAULT_MODE
114
+
115
+ def matchATN(self, input:InputStream):
116
+ startState = self.atn.modeToStartState[self.mode]
117
+
118
+ if LexerATNSimulator.debug:
119
+ print("matchATN mode " + str(self.mode) + " start: " + str(startState))
120
+
121
+ old_mode = self.mode
122
+ s0_closure = self.computeStartState(input, startState)
123
+ suppressEdge = s0_closure.hasSemanticContext
124
+ s0_closure.hasSemanticContext = False
125
+
126
+ next = self.addDFAState(s0_closure)
127
+ if not suppressEdge:
128
+ self.decisionToDFA[self.mode].s0 = next
129
+
130
+ predict = self.execATN(input, next)
131
+
132
+ if LexerATNSimulator.debug:
133
+ print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
134
+
135
+ return predict
136
+
137
+ def execATN(self, input:InputStream, ds0:DFAState):
138
+ if LexerATNSimulator.debug:
139
+ print("start state closure=" + str(ds0.configs))
140
+
141
+ if ds0.isAcceptState:
142
+ # allow zero-length tokens
143
+ self.captureSimState(self.prevAccept, input, ds0)
144
+
145
+ t = input.LA(1)
146
+ s = ds0 # s is current/from DFA state
147
+
148
+ while True: # while more work
149
+ if LexerATNSimulator.debug:
150
+ print("execATN loop starting closure:", str(s.configs))
151
+
152
+ # As we move src->trg, src->trg, we keep track of the previous trg to
153
+ # avoid looking up the DFA state again, which is expensive.
154
+ # If the previous target was already part of the DFA, we might
155
+ # be able to avoid doing a reach operation upon t. If s!=null,
156
+ # it means that semantic predicates didn't prevent us from
157
+ # creating a DFA state. Once we know s!=null, we check to see if
158
+ # the DFA state has an edge already for t. If so, we can just reuse
159
+ # it's configuration set; there's no point in re-computing it.
160
+ # This is kind of like doing DFA simulation within the ATN
161
+ # simulation because DFA simulation is really just a way to avoid
162
+ # computing reach/closure sets. Technically, once we know that
163
+ # we have a previously added DFA state, we could jump over to
164
+ # the DFA simulator. But, that would mean popping back and forth
165
+ # a lot and making things more complicated algorithmically.
166
+ # This optimization makes a lot of sense for loops within DFA.
167
+ # A character will take us back to an existing DFA state
168
+ # that already has lots of edges out of it. e.g., .* in comments.
169
+ # print("Target for:" + str(s) + " and:" + str(t))
170
+ target = self.getExistingTargetState(s, t)
171
+ # print("Existing:" + str(target))
172
+ if target is None:
173
+ target = self.computeTargetState(input, s, t)
174
+ # print("Computed:" + str(target))
175
+
176
+ if target == self.ERROR:
177
+ break
178
+
179
+ # If this is a consumable input element, make sure to consume before
180
+ # capturing the accept state so the input index, line, and char
181
+ # position accurately reflect the state of the interpreter at the
182
+ # end of the token.
183
+ if t != Token.EOF:
184
+ self.consume(input)
185
+
186
+ if target.isAcceptState:
187
+ self.captureSimState(self.prevAccept, input, target)
188
+ if t == Token.EOF:
189
+ break
190
+
191
+ t = input.LA(1)
192
+
193
+ s = target # flip; current DFA target becomes new src/from state
194
+
195
+ return self.failOrAccept(self.prevAccept, input, s.configs, t)
196
+
197
+ # Get an existing target state for an edge in the DFA. If the target state
198
+ # for the edge has not yet been computed or is otherwise not available,
199
+ # this method returns {@code null}.
200
+ #
201
+ # @param s The current DFA state
202
+ # @param t The next input symbol
203
+ # @return The existing target DFA state for the given input symbol
204
+ # {@code t}, or {@code null} if the target state for this edge is not
205
+ # already cached
206
+ def getExistingTargetState(self, s:DFAState, t:int):
207
+ if s.edges is None or t < self.MIN_DFA_EDGE or t > self.MAX_DFA_EDGE:
208
+ return None
209
+
210
+ target = s.edges[t - self.MIN_DFA_EDGE]
211
+ if LexerATNSimulator.debug and target is not None:
212
+ print("reuse state", str(s.stateNumber), "edge to", str(target.stateNumber))
213
+
214
+ return target
215
+
216
+ # Compute a target state for an edge in the DFA, and attempt to add the
217
+ # computed state and corresponding edge to the DFA.
218
+ #
219
+ # @param input The input stream
220
+ # @param s The current DFA state
221
+ # @param t The next input symbol
222
+ #
223
+ # @return The computed target DFA state for the given input symbol
224
+ # {@code t}. If {@code t} does not lead to a valid DFA state, this method
225
+ # returns {@link #ERROR}.
226
+ def computeTargetState(self, input:InputStream, s:DFAState, t:int):
227
+ reach = OrderedATNConfigSet()
228
+
229
+ # if we don't find an existing DFA state
230
+ # Fill reach starting from closure, following t transitions
231
+ self.getReachableConfigSet(input, s.configs, reach, t)
232
+
233
+ if len(reach)==0: # we got nowhere on t from s
234
+ if not reach.hasSemanticContext:
235
+ # we got nowhere on t, don't throw out this knowledge; it'd
236
+ # cause a failover from DFA later.
237
+ self. addDFAEdge(s, t, self.ERROR)
238
+
239
+ # stop when we can't match any more char
240
+ return self.ERROR
241
+
242
+ # Add an edge from s to target DFA found/created for reach
243
+ return self.addDFAEdge(s, t, cfgs=reach)
244
+
245
+ def failOrAccept(self, prevAccept:SimState , input:InputStream, reach:ATNConfigSet, t:int):
246
+ if self.prevAccept.dfaState is not None:
247
+ lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor
248
+ self.accept(input, lexerActionExecutor, self.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
249
+ return prevAccept.dfaState.prediction
250
+ else:
251
+ # if no accept and EOF is first char, return EOF
252
+ if t==Token.EOF and input.index==self.startIndex:
253
+ return Token.EOF
254
+ raise LexerNoViableAltException(self.recog, input, self.startIndex, reach)
255
+
256
+ # Given a starting configuration set, figure out all ATN configurations
257
+ # we can reach upon input {@code t}. Parameter {@code reach} is a return
258
+ # parameter.
259
+ def getReachableConfigSet(self, input:InputStream, closure:ATNConfigSet, reach:ATNConfigSet, t:int):
260
+ # this is used to skip processing for configs which have a lower priority
261
+ # than a config that already reached an accept state for the same rule
262
+ skipAlt = ATN.INVALID_ALT_NUMBER
263
+ for cfg in closure:
264
+ currentAltReachedAcceptState = ( cfg.alt == skipAlt )
265
+ if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
266
+ continue
267
+
268
+ if LexerATNSimulator.debug:
269
+ print("testing", self.getTokenName(t), "at", str(cfg))
270
+
271
+ for trans in cfg.state.transitions: # for each transition
272
+ target = self.getReachableTarget(trans, t)
273
+ if target is not None:
274
+ lexerActionExecutor = cfg.lexerActionExecutor
275
+ if lexerActionExecutor is not None:
276
+ lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - self.startIndex)
277
+
278
+ treatEofAsEpsilon = (t == Token.EOF)
279
+ config = LexerATNConfig(state=target, lexerActionExecutor=lexerActionExecutor, config=cfg)
280
+ if self.closure(input, config, reach, currentAltReachedAcceptState, True, treatEofAsEpsilon):
281
+ # any remaining configs for this alt have a lower priority than
282
+ # the one that just reached an accept state.
283
+ skipAlt = cfg.alt
284
+
285
+ def accept(self, input:InputStream, lexerActionExecutor:LexerActionExecutor, startIndex:int, index:int, line:int, charPos:int):
286
+ if LexerATNSimulator.debug:
287
+ print("ACTION", lexerActionExecutor)
288
+
289
+ # seek to after last char in token
290
+ input.seek(index)
291
+ self.line = line
292
+ self.column = charPos
293
+
294
+ if lexerActionExecutor is not None and self.recog is not None:
295
+ lexerActionExecutor.execute(self.recog, input, startIndex)
296
+
297
+ def getReachableTarget(self, trans:Transition, t:int):
298
+ if trans.matches(t, 0, self.MAX_CHAR_VALUE):
299
+ return trans.target
300
+ else:
301
+ return None
302
+
303
+ def computeStartState(self, input:InputStream, p:ATNState):
304
+ initialContext = PredictionContext.EMPTY
305
+ configs = OrderedATNConfigSet()
306
+ for i in range(0,len(p.transitions)):
307
+ target = p.transitions[i].target
308
+ c = LexerATNConfig(state=target, alt=i+1, context=initialContext)
309
+ self.closure(input, c, configs, False, False, False)
310
+ return configs
311
+
312
+ # Since the alternatives within any lexer decision are ordered by
313
+ # preference, this method stops pursuing the closure as soon as an accept
314
+ # state is reached. After the first accept state is reached by depth-first
315
+ # search from {@code config}, all other (potentially reachable) states for
316
+ # this rule would have a lower priority.
317
+ #
318
+ # @return {@code true} if an accept state is reached, otherwise
319
+ # {@code false}.
320
+ def closure(self, input:InputStream, config:LexerATNConfig, configs:ATNConfigSet, currentAltReachedAcceptState:bool,
321
+ speculative:bool, treatEofAsEpsilon:bool):
322
+ if LexerATNSimulator.debug:
323
+ print("closure(" + str(config) + ")")
324
+
325
+ if isinstance( config.state, RuleStopState ):
326
+ if LexerATNSimulator.debug:
327
+ if self.recog is not None:
328
+ print("closure at", self.recog.symbolicNames[config.state.ruleIndex], "rule stop", str(config))
329
+ else:
330
+ print("closure at rule stop", str(config))
331
+
332
+ if config.context is None or config.context.hasEmptyPath():
333
+ if config.context is None or config.context.isEmpty():
334
+ configs.add(config)
335
+ return True
336
+ else:
337
+ configs.add(LexerATNConfig(state=config.state, config=config, context=PredictionContext.EMPTY))
338
+ currentAltReachedAcceptState = True
339
+
340
+ if config.context is not None and not config.context.isEmpty():
341
+ for i in range(0,len(config.context)):
342
+ if config.context.getReturnState(i) != PredictionContext.EMPTY_RETURN_STATE:
343
+ newContext = config.context.getParent(i) # "pop" return state
344
+ returnState = self.atn.states[config.context.getReturnState(i)]
345
+ c = LexerATNConfig(state=returnState, config=config, context=newContext)
346
+ currentAltReachedAcceptState = self.closure(input, c, configs,
347
+ currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
348
+
349
+ return currentAltReachedAcceptState
350
+
351
+ # optimization
352
+ if not config.state.epsilonOnlyTransitions:
353
+ if not currentAltReachedAcceptState or not config.passedThroughNonGreedyDecision:
354
+ configs.add(config)
355
+
356
+ for t in config.state.transitions:
357
+ c = self.getEpsilonTarget(input, config, t, configs, speculative, treatEofAsEpsilon)
358
+ if c is not None:
359
+ currentAltReachedAcceptState = self.closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
360
+
361
+ return currentAltReachedAcceptState
362
+
363
+ # side-effect: can alter configs.hasSemanticContext
364
+ def getEpsilonTarget(self, input:InputStream, config:LexerATNConfig, t:Transition, configs:ATNConfigSet,
365
+ speculative:bool, treatEofAsEpsilon:bool):
366
+ c = None
367
+ if t.serializationType==Transition.RULE:
368
+ newContext = SingletonPredictionContext.create(config.context, t.followState.stateNumber)
369
+ c = LexerATNConfig(state=t.target, config=config, context=newContext)
370
+
371
+ elif t.serializationType==Transition.PRECEDENCE:
372
+ raise UnsupportedOperationException("Precedence predicates are not supported in lexers.")
373
+
374
+ elif t.serializationType==Transition.PREDICATE:
375
+ # Track traversing semantic predicates. If we traverse,
376
+ # we cannot add a DFA state for this "reach" computation
377
+ # because the DFA would not test the predicate again in the
378
+ # future. Rather than creating collections of semantic predicates
379
+ # like v3 and testing them on prediction, v4 will test them on the
380
+ # fly all the time using the ATN not the DFA. This is slower but
381
+ # semantically it's not used that often. One of the key elements to
382
+ # this predicate mechanism is not adding DFA states that see
383
+ # predicates immediately afterwards in the ATN. For example,
384
+
385
+ # a : ID {p1}? | ID {p2}? ;
386
+
387
+ # should create the start state for rule 'a' (to save start state
388
+ # competition), but should not create target of ID state. The
389
+ # collection of ATN states the following ID references includes
390
+ # states reached by traversing predicates. Since this is when we
391
+ # test them, we cannot cash the DFA state target of ID.
392
+
393
+ if LexerATNSimulator.debug:
394
+ print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
395
+ configs.hasSemanticContext = True
396
+ if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
397
+ c = LexerATNConfig(state=t.target, config=config)
398
+
399
+ elif t.serializationType==Transition.ACTION:
400
+ if config.context is None or config.context.hasEmptyPath():
401
+ # execute actions anywhere in the start rule for a token.
402
+ #
403
+ # TODO: if the entry rule is invoked recursively, some
404
+ # actions may be executed during the recursive call. The
405
+ # problem can appear when hasEmptyPath() is true but
406
+ # isEmpty() is false. In this case, the config needs to be
407
+ # split into two contexts - one with just the empty path
408
+ # and another with everything but the empty path.
409
+ # Unfortunately, the current algorithm does not allow
410
+ # getEpsilonTarget to return two configurations, so
411
+ # additional modifications are needed before we can support
412
+ # the split operation.
413
+ lexerActionExecutor = LexerActionExecutor.append(config.lexerActionExecutor,
414
+ self.atn.lexerActions[t.actionIndex])
415
+ c = LexerATNConfig(state=t.target, config=config, lexerActionExecutor=lexerActionExecutor)
416
+
417
+ else:
418
+ # ignore actions in referenced rules
419
+ c = LexerATNConfig(state=t.target, config=config)
420
+
421
+ elif t.serializationType==Transition.EPSILON:
422
+ c = LexerATNConfig(state=t.target, config=config)
423
+
424
+ elif t.serializationType in [ Transition.ATOM, Transition.RANGE, Transition.SET ]:
425
+ if treatEofAsEpsilon:
426
+ if t.matches(Token.EOF, 0, self.MAX_CHAR_VALUE):
427
+ c = LexerATNConfig(state=t.target, config=config)
428
+
429
+ return c
430
+
431
+ # Evaluate a predicate specified in the lexer.
432
+ #
433
+ # <p>If {@code speculative} is {@code true}, this method was called before
434
+ # {@link #consume} for the matched character. This method should call
435
+ # {@link #consume} before evaluating the predicate to ensure position
436
+ # sensitive values, including {@link Lexer#getText}, {@link Lexer#getLine},
437
+ # and {@link Lexer#getcolumn}, properly reflect the current
438
+ # lexer state. This method should restore {@code input} and the simulator
439
+ # to the original state before returning (i.e. undo the actions made by the
440
+ # call to {@link #consume}.</p>
441
+ #
442
+ # @param input The input stream.
443
+ # @param ruleIndex The rule containing the predicate.
444
+ # @param predIndex The index of the predicate within the rule.
445
+ # @param speculative {@code true} if the current index in {@code input} is
446
+ # one character before the predicate's location.
447
+ #
448
+ # @return {@code true} if the specified predicate evaluates to
449
+ # {@code true}.
450
+ #/
451
+ def evaluatePredicate(self, input:InputStream, ruleIndex:int, predIndex:int, speculative:bool):
452
+ # assume true if no recognizer was provided
453
+ if self.recog is None:
454
+ return True
455
+
456
+ if not speculative:
457
+ return self.recog.sempred(None, ruleIndex, predIndex)
458
+
459
+ savedcolumn = self.column
460
+ savedLine = self.line
461
+ index = input.index
462
+ marker = input.mark()
463
+ try:
464
+ self.consume(input)
465
+ return self.recog.sempred(None, ruleIndex, predIndex)
466
+ finally:
467
+ self.column = savedcolumn
468
+ self.line = savedLine
469
+ input.seek(index)
470
+ input.release(marker)
471
+
472
+ def captureSimState(self, settings:SimState, input:InputStream, dfaState:DFAState):
473
+ settings.index = input.index
474
+ settings.line = self.line
475
+ settings.column = self.column
476
+ settings.dfaState = dfaState
477
+
478
+ def addDFAEdge(self, from_:DFAState, tk:int, to:DFAState=None, cfgs:ATNConfigSet=None) -> DFAState:
479
+
480
+ if to is None and cfgs is not None:
481
+ # leading to this call, ATNConfigSet.hasSemanticContext is used as a
482
+ # marker indicating dynamic predicate evaluation makes this edge
483
+ # dependent on the specific input sequence, so the static edge in the
484
+ # DFA should be omitted. The target DFAState is still created since
485
+ # execATN has the ability to resynchronize with the DFA state cache
486
+ # following the predicate evaluation step.
487
+ #
488
+ # TJP notes: next time through the DFA, we see a pred again and eval.
489
+ # If that gets us to a previously created (but dangling) DFA
490
+ # state, we can continue in pure DFA mode from there.
491
+ #/
492
+ suppressEdge = cfgs.hasSemanticContext
493
+ cfgs.hasSemanticContext = False
494
+
495
+ to = self.addDFAState(cfgs)
496
+
497
+ if suppressEdge:
498
+ return to
499
+
500
+ # add the edge
501
+ if tk < self.MIN_DFA_EDGE or tk > self.MAX_DFA_EDGE:
502
+ # Only track edges within the DFA bounds
503
+ return to
504
+
505
+ if LexerATNSimulator.debug:
506
+ print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
507
+
508
+ if from_.edges is None:
509
+ # make room for tokens 1..n and -1 masquerading as index 0
510
+ from_.edges = [ None ] * (self.MAX_DFA_EDGE - self.MIN_DFA_EDGE + 1)
511
+
512
+ from_.edges[tk - self.MIN_DFA_EDGE] = to # connect
513
+
514
+ return to
515
+
516
+
517
+ # Add a new DFA state if there isn't one with this set of
518
+ # configurations already. This method also detects the first
519
+ # configuration containing an ATN rule stop state. Later, when
520
+ # traversing the DFA, we will know which rule to accept.
521
+ def addDFAState(self, configs:ATNConfigSet) -> DFAState:
522
+
523
+ proposed = DFAState(configs=configs)
524
+ firstConfigWithRuleStopState = next((cfg for cfg in configs if isinstance(cfg.state, RuleStopState)), None)
525
+
526
+ if firstConfigWithRuleStopState is not None:
527
+ proposed.isAcceptState = True
528
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
529
+ proposed.prediction = self.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]
530
+
531
+ dfa = self.decisionToDFA[self.mode]
532
+ existing = dfa.states.get(proposed, None)
533
+ if existing is not None:
534
+ return existing
535
+
536
+ newState = proposed
537
+
538
+ newState.stateNumber = len(dfa.states)
539
+ configs.setReadonly(True)
540
+ newState.configs = configs
541
+ dfa.states[newState] = newState
542
+ return newState
543
+
544
+ def getDFA(self, mode:int):
545
+ return self.decisionToDFA[mode]
546
+
547
+ # Get the text matched so far for the current token.
548
+ def getText(self, input:InputStream):
549
+ # index is first lookahead char, don't include.
550
+ return input.getText(self.startIndex, input.index-1)
551
+
552
+ def consume(self, input:InputStream):
553
+ curChar = input.LA(1)
554
+ if curChar==ord('\n'):
555
+ self.line += 1
556
+ self.column = 0
557
+ else:
558
+ self.column += 1
559
+ input.consume()
560
+
561
+ def getTokenName(self, t:int):
562
+ if t==-1:
563
+ return "EOF"
564
+ else:
565
+ return "'" + chr(t) + "'"
566
+
567
+
568
+ LexerATNSimulator.ERROR = DFAState(0x7FFFFFFF, ATNConfigSet())
569
+
570
+ del Lexer
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerAction.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ from enum import IntEnum
8
+
9
+ # need forward declaration
10
+ Lexer = None
11
+
12
+
13
+ class LexerActionType(IntEnum):
14
+
15
+ CHANNEL = 0 #The type of a {@link LexerChannelAction} action.
16
+ CUSTOM = 1 #The type of a {@link LexerCustomAction} action.
17
+ MODE = 2 #The type of a {@link LexerModeAction} action.
18
+ MORE = 3 #The type of a {@link LexerMoreAction} action.
19
+ POP_MODE = 4 #The type of a {@link LexerPopModeAction} action.
20
+ PUSH_MODE = 5 #The type of a {@link LexerPushModeAction} action.
21
+ SKIP = 6 #The type of a {@link LexerSkipAction} action.
22
+ TYPE = 7 #The type of a {@link LexerTypeAction} action.
23
+
24
+ class LexerAction(object):
25
+ __slots__ = ('actionType', 'isPositionDependent')
26
+
27
+ def __init__(self, action:LexerActionType):
28
+ self.actionType = action
29
+ self.isPositionDependent = False
30
+
31
+ def __hash__(self):
32
+ return hash(self.actionType)
33
+
34
+ def __eq__(self, other):
35
+ return self is other
36
+
37
+
38
+ #
39
+ # Implements the {@code skip} lexer action by calling {@link Lexer#skip}.
40
+ #
41
+ # <p>The {@code skip} command does not have any parameters, so this action is
42
+ # implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
43
+ class LexerSkipAction(LexerAction):
44
+
45
+ # Provides a singleton instance of this parameterless lexer action.
46
+ INSTANCE = None
47
+
48
+ def __init__(self):
49
+ super().__init__(LexerActionType.SKIP)
50
+
51
+ def execute(self, lexer:Lexer):
52
+ lexer.skip()
53
+
54
+ def __str__(self):
55
+ return "skip"
56
+
57
+ LexerSkipAction.INSTANCE = LexerSkipAction()
58
+
59
+ # Implements the {@code type} lexer action by calling {@link Lexer#setType}
60
+ # with the assigned type.
61
+ class LexerTypeAction(LexerAction):
62
+ __slots__ = 'type'
63
+
64
+ def __init__(self, type:int):
65
+ super().__init__(LexerActionType.TYPE)
66
+ self.type = type
67
+
68
+ def execute(self, lexer:Lexer):
69
+ lexer.type = self.type
70
+
71
+ def __hash__(self):
72
+ return hash((self.actionType, self.type))
73
+
74
+ def __eq__(self, other):
75
+ if self is other:
76
+ return True
77
+ elif not isinstance(other, LexerTypeAction):
78
+ return False
79
+ else:
80
+ return self.type == other.type
81
+
82
+ def __str__(self):
83
+ return "type(" + str(self.type) + ")"
84
+
85
+
86
+ # Implements the {@code pushMode} lexer action by calling
87
+ # {@link Lexer#pushMode} with the assigned mode.
88
+ class LexerPushModeAction(LexerAction):
89
+ __slots__ = 'mode'
90
+
91
+ def __init__(self, mode:int):
92
+ super().__init__(LexerActionType.PUSH_MODE)
93
+ self.mode = mode
94
+
95
+ # <p>This action is implemented by calling {@link Lexer#pushMode} with the
96
+ # value provided by {@link #getMode}.</p>
97
+ def execute(self, lexer:Lexer):
98
+ lexer.pushMode(self.mode)
99
+
100
+ def __hash__(self):
101
+ return hash((self.actionType, self.mode))
102
+
103
+ def __eq__(self, other):
104
+ if self is other:
105
+ return True
106
+ elif not isinstance(other, LexerPushModeAction):
107
+ return False
108
+ else:
109
+ return self.mode == other.mode
110
+
111
+ def __str__(self):
112
+ return "pushMode(" + str(self.mode) + ")"
113
+
114
+
115
+ # Implements the {@code popMode} lexer action by calling {@link Lexer#popMode}.
116
+ #
117
+ # <p>The {@code popMode} command does not have any parameters, so this action is
118
+ # implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
119
+ class LexerPopModeAction(LexerAction):
120
+
121
+ INSTANCE = None
122
+
123
+ def __init__(self):
124
+ super().__init__(LexerActionType.POP_MODE)
125
+
126
+ # <p>This action is implemented by calling {@link Lexer#popMode}.</p>
127
+ def execute(self, lexer:Lexer):
128
+ lexer.popMode()
129
+
130
+ def __str__(self):
131
+ return "popMode"
132
+
133
+ LexerPopModeAction.INSTANCE = LexerPopModeAction()
134
+
135
+ # Implements the {@code more} lexer action by calling {@link Lexer#more}.
136
+ #
137
+ # <p>The {@code more} command does not have any parameters, so this action is
138
+ # implemented as a singleton instance exposed by {@link #INSTANCE}.</p>
139
+ class LexerMoreAction(LexerAction):
140
+
141
+ INSTANCE = None
142
+
143
+ def __init__(self):
144
+ super().__init__(LexerActionType.MORE)
145
+
146
+ # <p>This action is implemented by calling {@link Lexer#popMode}.</p>
147
+ def execute(self, lexer:Lexer):
148
+ lexer.more()
149
+
150
+ def __str__(self):
151
+ return "more"
152
+
153
+ LexerMoreAction.INSTANCE = LexerMoreAction()
154
+
155
+ # Implements the {@code mode} lexer action by calling {@link Lexer#mode} with
156
+ # the assigned mode.
157
+ class LexerModeAction(LexerAction):
158
+ __slots__ = 'mode'
159
+
160
+ def __init__(self, mode:int):
161
+ super().__init__(LexerActionType.MODE)
162
+ self.mode = mode
163
+
164
+ # <p>This action is implemented by calling {@link Lexer#mode} with the
165
+ # value provided by {@link #getMode}.</p>
166
+ def execute(self, lexer:Lexer):
167
+ lexer.mode(self.mode)
168
+
169
+ def __hash__(self):
170
+ return hash((self.actionType, self.mode))
171
+
172
+ def __eq__(self, other):
173
+ if self is other:
174
+ return True
175
+ elif not isinstance(other, LexerModeAction):
176
+ return False
177
+ else:
178
+ return self.mode == other.mode
179
+
180
+ def __str__(self):
181
+ return "mode(" + str(self.mode) + ")"
182
+
183
+ # Executes a custom lexer action by calling {@link Recognizer#action} with the
184
+ # rule and action indexes assigned to the custom action. The implementation of
185
+ # a custom action is added to the generated code for the lexer in an override
186
+ # of {@link Recognizer#action} when the grammar is compiled.
187
+ #
188
+ # <p>This class may represent embedded actions created with the <code>{...}</code>
189
+ # syntax in ANTLR 4, as well as actions created for lexer commands where the
190
+ # command argument could not be evaluated when the grammar was compiled.</p>
191
+
192
+ class LexerCustomAction(LexerAction):
193
+ __slots__ = ('ruleIndex', 'actionIndex')
194
+
195
+ # Constructs a custom lexer action with the specified rule and action
196
+ # indexes.
197
+ #
198
+ # @param ruleIndex The rule index to use for calls to
199
+ # {@link Recognizer#action}.
200
+ # @param actionIndex The action index to use for calls to
201
+ # {@link Recognizer#action}.
202
+ #/
203
+ def __init__(self, ruleIndex:int, actionIndex:int):
204
+ super().__init__(LexerActionType.CUSTOM)
205
+ self.ruleIndex = ruleIndex
206
+ self.actionIndex = actionIndex
207
+ self.isPositionDependent = True
208
+
209
+ # <p>Custom actions are implemented by calling {@link Lexer#action} with the
210
+ # appropriate rule and action indexes.</p>
211
+ def execute(self, lexer:Lexer):
212
+ lexer.action(None, self.ruleIndex, self.actionIndex)
213
+
214
+ def __hash__(self):
215
+ return hash((self.actionType, self.ruleIndex, self.actionIndex))
216
+
217
+ def __eq__(self, other):
218
+ if self is other:
219
+ return True
220
+ elif not isinstance(other, LexerCustomAction):
221
+ return False
222
+ else:
223
+ return self.ruleIndex == other.ruleIndex and self.actionIndex == other.actionIndex
224
+
225
+ # Implements the {@code channel} lexer action by calling
226
+ # {@link Lexer#setChannel} with the assigned channel.
227
+ class LexerChannelAction(LexerAction):
228
+ __slots__ = 'channel'
229
+
230
+ # Constructs a new {@code channel} action with the specified channel value.
231
+ # @param channel The channel value to pass to {@link Lexer#setChannel}.
232
+ def __init__(self, channel:int):
233
+ super().__init__(LexerActionType.CHANNEL)
234
+ self.channel = channel
235
+
236
+ # <p>This action is implemented by calling {@link Lexer#setChannel} with the
237
+ # value provided by {@link #getChannel}.</p>
238
+ def execute(self, lexer:Lexer):
239
+ lexer._channel = self.channel
240
+
241
+ def __hash__(self):
242
+ return hash((self.actionType, self.channel))
243
+
244
+ def __eq__(self, other):
245
+ if self is other:
246
+ return True
247
+ elif not isinstance(other, LexerChannelAction):
248
+ return False
249
+ else:
250
+ return self.channel == other.channel
251
+
252
+ def __str__(self):
253
+ return "channel(" + str(self.channel) + ")"
254
+
255
+ # This implementation of {@link LexerAction} is used for tracking input offsets
256
+ # for position-dependent actions within a {@link LexerActionExecutor}.
257
+ #
258
+ # <p>This action is not serialized as part of the ATN, and is only required for
259
+ # position-dependent lexer actions which appear at a location other than the
260
+ # end of a rule. For more information about DFA optimizations employed for
261
+ # lexer actions, see {@link LexerActionExecutor#append} and
262
+ # {@link LexerActionExecutor#fixOffsetBeforeMatch}.</p>
263
+ class LexerIndexedCustomAction(LexerAction):
264
+ __slots__ = ('offset', 'action')
265
+
266
+ # Constructs a new indexed custom action by associating a character offset
267
+ # with a {@link LexerAction}.
268
+ #
269
+ # <p>Note: This class is only required for lexer actions for which
270
+ # {@link LexerAction#isPositionDependent} returns {@code true}.</p>
271
+ #
272
+ # @param offset The offset into the input {@link CharStream}, relative to
273
+ # the token start index, at which the specified lexer action should be
274
+ # executed.
275
+ # @param action The lexer action to execute at a particular offset in the
276
+ # input {@link CharStream}.
277
+ def __init__(self, offset:int, action:LexerAction):
278
+ super().__init__(action.actionType)
279
+ self.offset = offset
280
+ self.action = action
281
+ self.isPositionDependent = True
282
+
283
+ # <p>This method calls {@link #execute} on the result of {@link #getAction}
284
+ # using the provided {@code lexer}.</p>
285
+ def execute(self, lexer:Lexer):
286
+ # assume the input stream position was properly set by the calling code
287
+ self.action.execute(lexer)
288
+
289
+ def __hash__(self):
290
+ return hash((self.actionType, self.offset, self.action))
291
+
292
+ def __eq__(self, other):
293
+ if self is other:
294
+ return True
295
+ elif not isinstance(other, LexerIndexedCustomAction):
296
+ return False
297
+ else:
298
+ return self.offset == other.offset and self.action == other.action
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/LexerActionExecutor.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #/
6
+
7
+ # Represents an executor for a sequence of lexer actions which traversed during
8
+ # the matching operation of a lexer rule (token).
9
+ #
10
+ # <p>The executor tracks position information for position-dependent lexer actions
11
+ # efficiently, ensuring that actions appearing only at the end of the rule do
12
+ # not cause bloating of the {@link DFA} created for the lexer.</p>
13
+
14
+
15
+ from antlr4.InputStream import InputStream
16
+ from antlr4.atn.LexerAction import LexerAction, LexerIndexedCustomAction
17
+
18
+ # need a forward declaration
19
+ Lexer = None
20
+ LexerActionExecutor = None
21
+
22
+ class LexerActionExecutor(object):
23
+ __slots__ = ('lexerActions', 'hashCode')
24
+
25
+ def __init__(self, lexerActions:list=list()):
26
+ self.lexerActions = lexerActions
27
+ # Caches the result of {@link #hashCode} since the hash code is an element
28
+ # of the performance-critical {@link LexerATNConfig#hashCode} operation.
29
+ self.hashCode = hash("".join([str(la) for la in lexerActions]))
30
+
31
+
32
+ # Creates a {@link LexerActionExecutor} which executes the actions for
33
+ # the input {@code lexerActionExecutor} followed by a specified
34
+ # {@code lexerAction}.
35
+ #
36
+ # @param lexerActionExecutor The executor for actions already traversed by
37
+ # the lexer while matching a token within a particular
38
+ # {@link LexerATNConfig}. If this is {@code null}, the method behaves as
39
+ # though it were an empty executor.
40
+ # @param lexerAction The lexer action to execute after the actions
41
+ # specified in {@code lexerActionExecutor}.
42
+ #
43
+ # @return A {@link LexerActionExecutor} for executing the combine actions
44
+ # of {@code lexerActionExecutor} and {@code lexerAction}.
45
+ @staticmethod
46
+ def append(lexerActionExecutor:LexerActionExecutor , lexerAction:LexerAction ):
47
+ if lexerActionExecutor is None:
48
+ return LexerActionExecutor([ lexerAction ])
49
+
50
+ lexerActions = lexerActionExecutor.lexerActions + [ lexerAction ]
51
+ return LexerActionExecutor(lexerActions)
52
+
53
+ # Creates a {@link LexerActionExecutor} which encodes the current offset
54
+ # for position-dependent lexer actions.
55
+ #
56
+ # <p>Normally, when the executor encounters lexer actions where
57
+ # {@link LexerAction#isPositionDependent} returns {@code true}, it calls
58
+ # {@link IntStream#seek} on the input {@link CharStream} to set the input
59
+ # position to the <em>end</em> of the current token. This behavior provides
60
+ # for efficient DFA representation of lexer actions which appear at the end
61
+ # of a lexer rule, even when the lexer rule matches a variable number of
62
+ # characters.</p>
63
+ #
64
+ # <p>Prior to traversing a match transition in the ATN, the current offset
65
+ # from the token start index is assigned to all position-dependent lexer
66
+ # actions which have not already been assigned a fixed offset. By storing
67
+ # the offsets relative to the token start index, the DFA representation of
68
+ # lexer actions which appear in the middle of tokens remains efficient due
69
+ # to sharing among tokens of the same length, regardless of their absolute
70
+ # position in the input stream.</p>
71
+ #
72
+ # <p>If the current executor already has offsets assigned to all
73
+ # position-dependent lexer actions, the method returns {@code this}.</p>
74
+ #
75
+ # @param offset The current offset to assign to all position-dependent
76
+ # lexer actions which do not already have offsets assigned.
77
+ #
78
+ # @return A {@link LexerActionExecutor} which stores input stream offsets
79
+ # for all position-dependent lexer actions.
80
+ #/
81
+ def fixOffsetBeforeMatch(self, offset:int):
82
+ updatedLexerActions = None
83
+ for i in range(0, len(self.lexerActions)):
84
+ if self.lexerActions[i].isPositionDependent and not isinstance(self.lexerActions[i], LexerIndexedCustomAction):
85
+ if updatedLexerActions is None:
86
+ updatedLexerActions = [ la for la in self.lexerActions ]
87
+ updatedLexerActions[i] = LexerIndexedCustomAction(offset, self.lexerActions[i])
88
+
89
+ if updatedLexerActions is None:
90
+ return self
91
+ else:
92
+ return LexerActionExecutor(updatedLexerActions)
93
+
94
+
95
+ # Execute the actions encapsulated by this executor within the context of a
96
+ # particular {@link Lexer}.
97
+ #
98
+ # <p>This method calls {@link IntStream#seek} to set the position of the
99
+ # {@code input} {@link CharStream} prior to calling
100
+ # {@link LexerAction#execute} on a position-dependent action. Before the
101
+ # method returns, the input position will be restored to the same position
102
+ # it was in when the method was invoked.</p>
103
+ #
104
+ # @param lexer The lexer instance.
105
+ # @param input The input stream which is the source for the current token.
106
+ # When this method is called, the current {@link IntStream#index} for
107
+ # {@code input} should be the start of the following token, i.e. 1
108
+ # character past the end of the current token.
109
+ # @param startIndex The token start index. This value may be passed to
110
+ # {@link IntStream#seek} to set the {@code input} position to the beginning
111
+ # of the token.
112
+ #/
113
+ def execute(self, lexer:Lexer, input:InputStream, startIndex:int):
114
+ requiresSeek = False
115
+ stopIndex = input.index
116
+ try:
117
+ for lexerAction in self.lexerActions:
118
+ if isinstance(lexerAction, LexerIndexedCustomAction):
119
+ offset = lexerAction.offset
120
+ input.seek(startIndex + offset)
121
+ lexerAction = lexerAction.action
122
+ requiresSeek = (startIndex + offset) != stopIndex
123
+ elif lexerAction.isPositionDependent:
124
+ input.seek(stopIndex)
125
+ requiresSeek = False
126
+ lexerAction.execute(lexer)
127
+ finally:
128
+ if requiresSeek:
129
+ input.seek(stopIndex)
130
+
131
+ def __hash__(self):
132
+ return self.hashCode
133
+
134
+ def __eq__(self, other):
135
+ if self is other:
136
+ return True
137
+ elif not isinstance(other, LexerActionExecutor):
138
+ return False
139
+ else:
140
+ return self.hashCode == other.hashCode \
141
+ and self.lexerActions == other.lexerActions
142
+
143
+ del Lexer
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/ParserATNSimulator.py ADDED
@@ -0,0 +1,1649 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ #
8
+ # The embodiment of the adaptive LL(*), ALL(*), parsing strategy.
9
+ #
10
+ # <p>
11
+ # The basic complexity of the adaptive strategy makes it harder to understand.
12
+ # We begin with ATN simulation to build paths in a DFA. Subsequent prediction
13
+ # requests go through the DFA first. If they reach a state without an edge for
14
+ # the current symbol, the algorithm fails over to the ATN simulation to
15
+ # complete the DFA path for the current input (until it finds a conflict state
16
+ # or uniquely predicting state).</p>
17
+ #
18
+ # <p>
19
+ # All of that is done without using the outer context because we want to create
20
+ # a DFA that is not dependent upon the rule invocation stack when we do a
21
+ # prediction. One DFA works in all contexts. We avoid using context not
22
+ # necessarily because it's slower, although it can be, but because of the DFA
23
+ # caching problem. The closure routine only considers the rule invocation stack
24
+ # created during prediction beginning in the decision rule. For example, if
25
+ # prediction occurs without invoking another rule's ATN, there are no context
26
+ # stacks in the configurations. When lack of context leads to a conflict, we
27
+ # don't know if it's an ambiguity or a weakness in the strong LL(*) parsing
28
+ # strategy (versus full LL(*)).</p>
29
+ #
30
+ # <p>
31
+ # When SLL yields a configuration set with conflict, we rewind the input and
32
+ # retry the ATN simulation, this time using full outer context without adding
33
+ # to the DFA. Configuration context stacks will be the full invocation stacks
34
+ # from the start rule. If we get a conflict using full context, then we can
35
+ # definitively say we have a true ambiguity for that input sequence. If we
36
+ # don't get a conflict, it implies that the decision is sensitive to the outer
37
+ # context. (It is not context-sensitive in the sense of context-sensitive
38
+ # grammars.)</p>
39
+ #
40
+ # <p>
41
+ # The next time we reach this DFA state with an SLL conflict, through DFA
42
+ # simulation, we will again retry the ATN simulation using full context mode.
43
+ # This is slow because we can't save the results and have to "interpret" the
44
+ # ATN each time we get that input.</p>
45
+ #
46
+ # <p>
47
+ # <strong>CACHING FULL CONTEXT PREDICTIONS</strong></p>
48
+ #
49
+ # <p>
50
+ # We could cache results from full context to predicted alternative easily and
51
+ # that saves a lot of time but doesn't work in presence of predicates. The set
52
+ # of visible predicates from the ATN start state changes depending on the
53
+ # context, because closure can fall off the end of a rule. I tried to cache
54
+ # tuples (stack context, semantic context, predicted alt) but it was slower
55
+ # than interpreting and much more complicated. Also required a huge amount of
56
+ # memory. The goal is not to create the world's fastest parser anyway. I'd like
57
+ # to keep this algorithm simple. By launching multiple threads, we can improve
58
+ # the speed of parsing across a large number of files.</p>
59
+ #
60
+ # <p>
61
+ # There is no strict ordering between the amount of input used by SLL vs LL,
62
+ # which makes it really hard to build a cache for full context. Let's say that
63
+ # we have input A B C that leads to an SLL conflict with full context X. That
64
+ # implies that using X we might only use A B but we could also use A B C D to
65
+ # resolve conflict. Input A B C D could predict alternative 1 in one position
66
+ # in the input and A B C E could predict alternative 2 in another position in
67
+ # input. The conflicting SLL configurations could still be non-unique in the
68
+ # full context prediction, which would lead us to requiring more input than the
69
+ # original A B C. To make a prediction cache work, we have to track the exact
70
+ # input used during the previous prediction. That amounts to a cache that maps
71
+ # X to a specific DFA for that context.</p>
72
+ #
73
+ # <p>
74
+ # Something should be done for left-recursive expression predictions. They are
75
+ # likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry
76
+ # with full LL thing Sam does.</p>
77
+ #
78
+ # <p>
79
+ # <strong>AVOIDING FULL CONTEXT PREDICTION</strong></p>
80
+ #
81
+ # <p>
82
+ # We avoid doing full context retry when the outer context is empty, we did not
83
+ # dip into the outer context by falling off the end of the decision state rule,
84
+ # or when we force SLL mode.</p>
85
+ #
86
+ # <p>
87
+ # As an example of the not dip into outer context case, consider as super
88
+ # constructor calls versus function calls. One grammar might look like
89
+ # this:</p>
90
+ #
91
+ # <pre>
92
+ # ctorBody
93
+ # : '{' superCall? stat* '}'
94
+ # ;
95
+ # </pre>
96
+ #
97
+ # <p>
98
+ # Or, you might see something like</p>
99
+ #
100
+ # <pre>
101
+ # stat
102
+ # : superCall ';'
103
+ # | expression ';'
104
+ # | ...
105
+ # ;
106
+ # </pre>
107
+ #
108
+ # <p>
109
+ # In both cases I believe that no closure operations will dip into the outer
110
+ # context. In the first case ctorBody in the worst case will stop at the '}'.
111
+ # In the 2nd case it should stop at the ';'. Both cases should stay within the
112
+ # entry rule and not dip into the outer context.</p>
113
+ #
114
+ # <p>
115
+ # <strong>PREDICATES</strong></p>
116
+ #
117
+ # <p>
118
+ # Predicates are always evaluated if present in either SLL or LL both. SLL and
119
+ # LL simulation deals with predicates differently. SLL collects predicates as
120
+ # it performs closure operations like ANTLR v3 did. It delays predicate
121
+ # evaluation until it reaches and accept state. This allows us to cache the SLL
122
+ # ATN simulation whereas, if we had evaluated predicates on-the-fly during
123
+ # closure, the DFA state configuration sets would be different and we couldn't
124
+ # build up a suitable DFA.</p>
125
+ #
126
+ # <p>
127
+ # When building a DFA accept state during ATN simulation, we evaluate any
128
+ # predicates and return the sole semantically valid alternative. If there is
129
+ # more than 1 alternative, we report an ambiguity. If there are 0 alternatives,
130
+ # we throw an exception. Alternatives without predicates act like they have
131
+ # true predicates. The simple way to think about it is to strip away all
132
+ # alternatives with false predicates and choose the minimum alternative that
133
+ # remains.</p>
134
+ #
135
+ # <p>
136
+ # When we start in the DFA and reach an accept state that's predicated, we test
137
+ # those and return the minimum semantically viable alternative. If no
138
+ # alternatives are viable, we throw an exception.</p>
139
+ #
140
+ # <p>
141
+ # During full LL ATN simulation, closure always evaluates predicates and
142
+ # on-the-fly. This is crucial to reducing the configuration set size during
143
+ # closure. It hits a landmine when parsing with the Java grammar, for example,
144
+ # without this on-the-fly evaluation.</p>
145
+ #
146
+ # <p>
147
+ # <strong>SHARING DFA</strong></p>
148
+ #
149
+ # <p>
150
+ # All instances of the same parser share the same decision DFAs through a
151
+ # static field. Each instance gets its own ATN simulator but they share the
152
+ # same {@link #decisionToDFA} field. They also share a
153
+ # {@link PredictionContextCache} object that makes sure that all
154
+ # {@link PredictionContext} objects are shared among the DFA states. This makes
155
+ # a big size difference.</p>
156
+ #
157
+ # <p>
158
+ # <strong>THREAD SAFETY</strong></p>
159
+ #
160
+ # <p>
161
+ # The {@link ParserATNSimulator} locks on the {@link #decisionToDFA} field when
162
+ # it adds a new DFA object to that array. {@link #addDFAEdge}
163
+ # locks on the DFA for the current decision when setting the
164
+ # {@link DFAState#edges} field. {@link #addDFAState} locks on
165
+ # the DFA for the current decision when looking up a DFA state to see if it
166
+ # already exists. We must make sure that all requests to add DFA states that
167
+ # are equivalent result in the same shared DFA object. This is because lots of
168
+ # threads will be trying to update the DFA at once. The
169
+ # {@link #addDFAState} method also locks inside the DFA lock
170
+ # but this time on the shared context cache when it rebuilds the
171
+ # configurations' {@link PredictionContext} objects using cached
172
+ # subgraphs/nodes. No other locking occurs, even during DFA simulation. This is
173
+ # safe as long as we can guarantee that all threads referencing
174
+ # {@code s.edge[t]} get the same physical target {@link DFAState}, or
175
+ # {@code null}. Once into the DFA, the DFA simulation does not reference the
176
+ # {@link DFA#states} map. It follows the {@link DFAState#edges} field to new
177
+ # targets. The DFA simulator will either find {@link DFAState#edges} to be
178
+ # {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or
179
+ # {@code dfa.edges[t]} to be non-null. The
180
+ # {@link #addDFAEdge} method could be racing to set the field
181
+ # but in either case the DFA simulator works; if {@code null}, and requests ATN
182
+ # simulation. It could also race trying to get {@code dfa.edges[t]}, but either
183
+ # way it will work because it's not doing a test and set operation.</p>
184
+ #
185
+ # <p>
186
+ # <strong>Starting with SLL then failing to combined SLL/LL (Two-Stage
187
+ # Parsing)</strong></p>
188
+ #
189
+ # <p>
190
+ # Sam pointed out that if SLL does not give a syntax error, then there is no
191
+ # point in doing full LL, which is slower. We only have to try LL if we get a
192
+ # syntax error. For maximum speed, Sam starts the parser set to pure SLL
193
+ # mode with the {@link BailErrorStrategy}:</p>
194
+ #
195
+ # <pre>
196
+ # parser.{@link Parser#getInterpreter() getInterpreter()}.{@link #setPredictionMode setPredictionMode}{@code (}{@link PredictionMode#SLL}{@code )};
197
+ # parser.{@link Parser#setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}());
198
+ # </pre>
199
+ #
200
+ # <p>
201
+ # If it does not get a syntax error, then we're done. If it does get a syntax
202
+ # error, we need to retry with the combined SLL/LL strategy.</p>
203
+ #
204
+ # <p>
205
+ # The reason this works is as follows. If there are no SLL conflicts, then the
206
+ # grammar is SLL (at least for that input set). If there is an SLL conflict,
207
+ # the full LL analysis must yield a set of viable alternatives which is a
208
+ # subset of the alternatives reported by SLL. If the LL set is a singleton,
209
+ # then the grammar is LL but not SLL. If the LL set is the same size as the SLL
210
+ # set, the decision is SLL. If the LL set has size &gt; 1, then that decision
211
+ # is truly ambiguous on the current input. If the LL set is smaller, then the
212
+ # SLL conflict resolution might choose an alternative that the full LL would
213
+ # rule out as a possibility based upon better context information. If that's
214
+ # the case, then the SLL parse will definitely get an error because the full LL
215
+ # analysis says it's not viable. If SLL conflict resolution chooses an
216
+ # alternative within the LL set, them both SLL and LL would choose the same
217
+ # alternative because they both choose the minimum of multiple conflicting
218
+ # alternatives.</p>
219
+ #
220
+ # <p>
221
+ # Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and
222
+ # a smaller LL set called <em>s</em>. If <em>s</em> is {@code {2, 3}}, then SLL
223
+ # parsing will get an error because SLL will pursue alternative 1. If
224
+ # <em>s</em> is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will
225
+ # choose the same alternative because alternative one is the minimum of either
226
+ # set. If <em>s</em> is {@code {2}} or {@code {3}} then SLL will get a syntax
227
+ # error. If <em>s</em> is {@code {1}} then SLL will succeed.</p>
228
+ #
229
+ # <p>
230
+ # Of course, if the input is invalid, then we will get an error for sure in
231
+ # both SLL and LL parsing. Erroneous input will therefore require 2 passes over
232
+ # the input.</p>
233
+ #
234
+ import sys
235
+ from antlr4 import DFA
236
+ from antlr4.PredictionContext import PredictionContextCache, PredictionContext, SingletonPredictionContext, \
237
+ PredictionContextFromRuleContext
238
+ from antlr4.BufferedTokenStream import TokenStream
239
+ from antlr4.Parser import Parser
240
+ from antlr4.ParserRuleContext import ParserRuleContext
241
+ from antlr4.RuleContext import RuleContext
242
+ from antlr4.Token import Token
243
+ from antlr4.Utils import str_list
244
+ from antlr4.atn.ATN import ATN
245
+ from antlr4.atn.ATNConfig import ATNConfig
246
+ from antlr4.atn.ATNConfigSet import ATNConfigSet
247
+ from antlr4.atn.ATNSimulator import ATNSimulator
248
+ from antlr4.atn.ATNState import StarLoopEntryState, DecisionState, RuleStopState, ATNState
249
+ from antlr4.atn.PredictionMode import PredictionMode
250
+ from antlr4.atn.SemanticContext import SemanticContext, AND, andContext, orContext
251
+ from antlr4.atn.Transition import Transition, RuleTransition, ActionTransition, PrecedencePredicateTransition, \
252
+ PredicateTransition, AtomTransition, SetTransition, NotSetTransition
253
+ from antlr4.dfa.DFAState import DFAState, PredPrediction
254
+ from antlr4.error.Errors import NoViableAltException
255
+
256
+
257
+ class ParserATNSimulator(ATNSimulator):
258
+ __slots__ = (
259
+ 'parser', 'decisionToDFA', 'predictionMode', '_input', '_startIndex',
260
+ '_outerContext', '_dfa', 'mergeCache'
261
+ )
262
+
263
+ debug = False
264
+ debug_list_atn_decisions = False
265
+ dfa_debug = False
266
+ retry_debug = False
267
+
268
+
269
+ def __init__(self, parser:Parser, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache):
270
+ super().__init__(atn, sharedContextCache)
271
+ self.parser = parser
272
+ self.decisionToDFA = decisionToDFA
273
+ # SLL, LL, or LL + exact ambig detection?#
274
+ self.predictionMode = PredictionMode.LL
275
+ # LAME globals to avoid parameters!!!!! I need these down deep in predTransition
276
+ self._input = None
277
+ self._startIndex = 0
278
+ self._outerContext = None
279
+ self._dfa = None
280
+ # Each prediction operation uses a cache for merge of prediction contexts.
281
+ # Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
282
+ # isn't synchronized but we're ok since two threads shouldn't reuse same
283
+ # parser/atnsim object because it can only handle one input at a time.
284
+ # This maps graphs a and b to merged result c. (a,b)&rarr;c. We can avoid
285
+ # the merge if we ever see a and b again. Note that (b,a)&rarr;c should
286
+ # also be examined during cache lookup.
287
+ #
288
+ self.mergeCache = None
289
+
290
+
291
+ def reset(self):
292
+ pass
293
+
294
+ def adaptivePredict(self, input:TokenStream, decision:int, outerContext:ParserRuleContext):
295
+ if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
296
+ print("adaptivePredict decision " + str(decision) +
297
+ " exec LA(1)==" + self.getLookaheadName(input) +
298
+ " line " + str(input.LT(1).line) + ":" +
299
+ str(input.LT(1).column))
300
+ self._input = input
301
+ self._startIndex = input.index
302
+ self._outerContext = outerContext
303
+
304
+ dfa = self.decisionToDFA[decision]
305
+ self._dfa = dfa
306
+ m = input.mark()
307
+ index = input.index
308
+
309
+ # Now we are certain to have a specific decision's DFA
310
+ # But, do we still need an initial state?
311
+ try:
312
+ if dfa.precedenceDfa:
313
+ # the start state for a precedence DFA depends on the current
314
+ # parser precedence, and is provided by a DFA method.
315
+ s0 = dfa.getPrecedenceStartState(self.parser.getPrecedence())
316
+ else:
317
+ # the start state for a "regular" DFA is just s0
318
+ s0 = dfa.s0
319
+
320
+ if s0 is None:
321
+ if outerContext is None:
322
+ outerContext = ParserRuleContext.EMPTY
323
+ if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
324
+ print("predictATN decision " + str(dfa.decision) +
325
+ " exec LA(1)==" + self.getLookaheadName(input) +
326
+ ", outerContext=" + outerContext.toString(self.parser.literalNames, None))
327
+
328
+ fullCtx = False
329
+ s0_closure = self.computeStartState(dfa.atnStartState, ParserRuleContext.EMPTY, fullCtx)
330
+
331
+ if dfa.precedenceDfa:
332
+ # If this is a precedence DFA, we use applyPrecedenceFilter
333
+ # to convert the computed start state to a precedence start
334
+ # state. We then use DFA.setPrecedenceStartState to set the
335
+ # appropriate start state for the precedence level rather
336
+ # than simply setting DFA.s0.
337
+ #
338
+ dfa.s0.configs = s0_closure # not used for prediction but useful to know start configs anyway
339
+ s0_closure = self.applyPrecedenceFilter(s0_closure)
340
+ s0 = self.addDFAState(dfa, DFAState(configs=s0_closure))
341
+ dfa.setPrecedenceStartState(self.parser.getPrecedence(), s0)
342
+ else:
343
+ s0 = self.addDFAState(dfa, DFAState(configs=s0_closure))
344
+ dfa.s0 = s0
345
+
346
+ alt = self.execATN(dfa, s0, input, index, outerContext)
347
+ if ParserATNSimulator.debug:
348
+ print("DFA after predictATN: " + dfa.toString(self.parser.literalNames))
349
+ return alt
350
+ finally:
351
+ self._dfa = None
352
+ self.mergeCache = None # wack cache after each prediction
353
+ input.seek(index)
354
+ input.release(m)
355
+
356
+ # Performs ATN simulation to compute a predicted alternative based
357
+ # upon the remaining input, but also updates the DFA cache to avoid
358
+ # having to traverse the ATN again for the same input sequence.
359
+
360
+ # There are some key conditions we're looking for after computing a new
361
+ # set of ATN configs (proposed DFA state):
362
+ # if the set is empty, there is no viable alternative for current symbol
363
+ # does the state uniquely predict an alternative?
364
+ # does the state have a conflict that would prevent us from
365
+ # putting it on the work list?
366
+
367
+ # We also have some key operations to do:
368
+ # add an edge from previous DFA state to potentially new DFA state, D,
369
+ # upon current symbol but only if adding to work list, which means in all
370
+ # cases except no viable alternative (and possibly non-greedy decisions?)
371
+ # collecting predicates and adding semantic context to DFA accept states
372
+ # adding rule context to context-sensitive DFA accept states
373
+ # consuming an input symbol
374
+ # reporting a conflict
375
+ # reporting an ambiguity
376
+ # reporting a context sensitivity
377
+ # reporting insufficient predicates
378
+
379
+ # cover these cases:
380
+ # dead end
381
+ # single alt
382
+ # single alt + preds
383
+ # conflict
384
+ # conflict + preds
385
+ #
386
+ def execATN(self, dfa:DFA, s0:DFAState, input:TokenStream, startIndex:int, outerContext:ParserRuleContext ):
387
+ if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
388
+ print("execATN decision " + str(dfa.decision) +
389
+ " exec LA(1)==" + self.getLookaheadName(input) +
390
+ " line " + str(input.LT(1).line) + ":" + str(input.LT(1).column))
391
+
392
+ previousD = s0
393
+
394
+ if ParserATNSimulator.debug:
395
+ print("s0 = " + str(s0))
396
+
397
+ t = input.LA(1)
398
+
399
+ while True: # while more work
400
+ D = self.getExistingTargetState(previousD, t)
401
+ if D is None:
402
+ D = self.computeTargetState(dfa, previousD, t)
403
+ if D is self.ERROR:
404
+ # if any configs in previous dipped into outer context, that
405
+ # means that input up to t actually finished entry rule
406
+ # at least for SLL decision. Full LL doesn't dip into outer
407
+ # so don't need special case.
408
+ # We will get an error no matter what so delay until after
409
+ # decision; better error message. Also, no reachable target
410
+ # ATN states in SLL implies LL will also get nowhere.
411
+ # If conflict in states that dip out, choose min since we
412
+ # will get error no matter what.
413
+ e = self.noViableAlt(input, outerContext, previousD.configs, startIndex)
414
+ input.seek(startIndex)
415
+ alt = self.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
416
+ if alt!=ATN.INVALID_ALT_NUMBER:
417
+ return alt
418
+ raise e
419
+
420
+ if D.requiresFullContext and self.predictionMode != PredictionMode.SLL:
421
+ # IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
422
+ conflictingAlts = D.configs.conflictingAlts
423
+ if D.predicates is not None:
424
+ if ParserATNSimulator.debug:
425
+ print("DFA state has preds in DFA sim LL failover")
426
+ conflictIndex = input.index
427
+ if conflictIndex != startIndex:
428
+ input.seek(startIndex)
429
+
430
+ conflictingAlts = self.evalSemanticContext(D.predicates, outerContext, True)
431
+ if len(conflictingAlts)==1:
432
+ if ParserATNSimulator.debug:
433
+ print("Full LL avoided")
434
+ return min(conflictingAlts)
435
+
436
+ if conflictIndex != startIndex:
437
+ # restore the index so reporting the fallback to full
438
+ # context occurs with the index at the correct spot
439
+ input.seek(conflictIndex)
440
+
441
+ if ParserATNSimulator.dfa_debug:
442
+ print("ctx sensitive state " + str(outerContext) +" in " + str(D))
443
+ fullCtx = True
444
+ s0_closure = self.computeStartState(dfa.atnStartState, outerContext, fullCtx)
445
+ self.reportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.index)
446
+ alt = self.execATNWithFullContext(dfa, D, s0_closure, input, startIndex, outerContext)
447
+ return alt
448
+
449
+ if D.isAcceptState:
450
+ if D.predicates is None:
451
+ return D.prediction
452
+
453
+ stopIndex = input.index
454
+ input.seek(startIndex)
455
+ alts = self.evalSemanticContext(D.predicates, outerContext, True)
456
+ if len(alts)==0:
457
+ raise self.noViableAlt(input, outerContext, D.configs, startIndex)
458
+ elif len(alts)==1:
459
+ return min(alts)
460
+ else:
461
+ # report ambiguity after predicate evaluation to make sure the correct
462
+ # set of ambig alts is reported.
463
+ self.reportAmbiguity(dfa, D, startIndex, stopIndex, False, alts, D.configs)
464
+ return min(alts)
465
+
466
+ previousD = D
467
+
468
+ if t != Token.EOF:
469
+ input.consume()
470
+ t = input.LA(1)
471
+
472
+ #
473
+ # Get an existing target state for an edge in the DFA. If the target state
474
+ # for the edge has not yet been computed or is otherwise not available,
475
+ # this method returns {@code null}.
476
+ #
477
+ # @param previousD The current DFA state
478
+ # @param t The next input symbol
479
+ # @return The existing target DFA state for the given input symbol
480
+ # {@code t}, or {@code null} if the target state for this edge is not
481
+ # already cached
482
+ #
483
+ def getExistingTargetState(self, previousD:DFAState, t:int):
484
+ edges = previousD.edges
485
+ if edges is None or t + 1 < 0 or t + 1 >= len(edges):
486
+ return None
487
+ else:
488
+ return edges[t + 1]
489
+
490
+ #
491
+ # Compute a target state for an edge in the DFA, and attempt to add the
492
+ # computed state and corresponding edge to the DFA.
493
+ #
494
+ # @param dfa The DFA
495
+ # @param previousD The current DFA state
496
+ # @param t The next input symbol
497
+ #
498
+ # @return The computed target DFA state for the given input symbol
499
+ # {@code t}. If {@code t} does not lead to a valid DFA state, this method
500
+ # returns {@link #ERROR}.
501
+ #
502
+ def computeTargetState(self, dfa:DFA, previousD:DFAState, t:int):
503
+ reach = self.computeReachSet(previousD.configs, t, False)
504
+ if reach is None:
505
+ self.addDFAEdge(dfa, previousD, t, self.ERROR)
506
+ return self.ERROR
507
+
508
+ # create new target state; we'll add to DFA after it's complete
509
+ D = DFAState(configs=reach)
510
+
511
+ predictedAlt = self.getUniqueAlt(reach)
512
+
513
+ if ParserATNSimulator.debug:
514
+ altSubSets = PredictionMode.getConflictingAltSubsets(reach)
515
+ print("SLL altSubSets=" + str(altSubSets) + ", configs=" + str(reach) +
516
+ ", predict=" + str(predictedAlt) + ", allSubsetsConflict=" +
517
+ str(PredictionMode.allSubsetsConflict(altSubSets)) + ", conflictingAlts=" +
518
+ str(self.getConflictingAlts(reach)))
519
+
520
+ if predictedAlt!=ATN.INVALID_ALT_NUMBER:
521
+ # NO CONFLICT, UNIQUELY PREDICTED ALT
522
+ D.isAcceptState = True
523
+ D.configs.uniqueAlt = predictedAlt
524
+ D.prediction = predictedAlt
525
+ elif PredictionMode.hasSLLConflictTerminatingPrediction(self.predictionMode, reach):
526
+ # MORE THAN ONE VIABLE ALTERNATIVE
527
+ D.configs.conflictingAlts = self.getConflictingAlts(reach)
528
+ D.requiresFullContext = True
529
+ # in SLL-only mode, we will stop at this state and return the minimum alt
530
+ D.isAcceptState = True
531
+ D.prediction = min(D.configs.conflictingAlts)
532
+
533
+ if D.isAcceptState and D.configs.hasSemanticContext:
534
+ self.predicateDFAState(D, self.atn.getDecisionState(dfa.decision))
535
+ if D.predicates is not None:
536
+ D.prediction = ATN.INVALID_ALT_NUMBER
537
+
538
+ # all adds to dfa are done after we've created full D state
539
+ D = self.addDFAEdge(dfa, previousD, t, D)
540
+ return D
541
+
542
+ def predicateDFAState(self, dfaState:DFAState, decisionState:DecisionState):
543
+ # We need to test all predicates, even in DFA states that
544
+ # uniquely predict alternative.
545
+ nalts = len(decisionState.transitions)
546
+ # Update DFA so reach becomes accept state with (predicate,alt)
547
+ # pairs if preds found for conflicting alts
548
+ altsToCollectPredsFrom = self.getConflictingAltsOrUniqueAlt(dfaState.configs)
549
+ altToPred = self.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
550
+ if altToPred is not None:
551
+ dfaState.predicates = self.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
552
+ dfaState.prediction = ATN.INVALID_ALT_NUMBER # make sure we use preds
553
+ else:
554
+ # There are preds in configs but they might go away
555
+ # when OR'd together like {p}? || NONE == NONE. If neither
556
+ # alt has preds, resolve to min alt
557
+ dfaState.prediction = min(altsToCollectPredsFrom)
558
+
559
+ # comes back with reach.uniqueAlt set to a valid alt
560
+ def execATNWithFullContext(self, dfa:DFA, D:DFAState, # how far we got before failing over
561
+ s0:ATNConfigSet,
562
+ input:TokenStream,
563
+ startIndex:int,
564
+ outerContext:ParserRuleContext):
565
+ if ParserATNSimulator.debug or ParserATNSimulator.debug_list_atn_decisions:
566
+ print("execATNWithFullContext", str(s0))
567
+ fullCtx = True
568
+ foundExactAmbig = False
569
+ reach = None
570
+ previous = s0
571
+ input.seek(startIndex)
572
+ t = input.LA(1)
573
+ predictedAlt = -1
574
+ while (True): # while more work
575
+ reach = self.computeReachSet(previous, t, fullCtx)
576
+ if reach is None:
577
+ # if any configs in previous dipped into outer context, that
578
+ # means that input up to t actually finished entry rule
579
+ # at least for LL decision. Full LL doesn't dip into outer
580
+ # so don't need special case.
581
+ # We will get an error no matter what so delay until after
582
+ # decision; better error message. Also, no reachable target
583
+ # ATN states in SLL implies LL will also get nowhere.
584
+ # If conflict in states that dip out, choose min since we
585
+ # will get error no matter what.
586
+ e = self.noViableAlt(input, outerContext, previous, startIndex)
587
+ input.seek(startIndex)
588
+ alt = self.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
589
+ if alt!=ATN.INVALID_ALT_NUMBER:
590
+ return alt
591
+ else:
592
+ raise e
593
+
594
+ altSubSets = PredictionMode.getConflictingAltSubsets(reach)
595
+ if ParserATNSimulator.debug:
596
+ print("LL altSubSets=" + str(altSubSets) + ", predict=" +
597
+ str(PredictionMode.getUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
598
+ str(PredictionMode.resolvesToJustOneViableAlt(altSubSets)))
599
+
600
+ reach.uniqueAlt = self.getUniqueAlt(reach)
601
+ # unique prediction?
602
+ if reach.uniqueAlt!=ATN.INVALID_ALT_NUMBER:
603
+ predictedAlt = reach.uniqueAlt
604
+ break
605
+ elif self.predictionMode is not PredictionMode.LL_EXACT_AMBIG_DETECTION:
606
+ predictedAlt = PredictionMode.resolvesToJustOneViableAlt(altSubSets)
607
+ if predictedAlt != ATN.INVALID_ALT_NUMBER:
608
+ break
609
+ else:
610
+ # In exact ambiguity mode, we never try to terminate early.
611
+ # Just keeps scarfing until we know what the conflict is
612
+ if PredictionMode.allSubsetsConflict(altSubSets) and PredictionMode.allSubsetsEqual(altSubSets):
613
+ foundExactAmbig = True
614
+ predictedAlt = PredictionMode.getSingleViableAlt(altSubSets)
615
+ break
616
+ # else there are multiple non-conflicting subsets or
617
+ # we're not sure what the ambiguity is yet.
618
+ # So, keep going.
619
+
620
+ previous = reach
621
+ if t != Token.EOF:
622
+ input.consume()
623
+ t = input.LA(1)
624
+
625
+ # If the configuration set uniquely predicts an alternative,
626
+ # without conflict, then we know that it's a full LL decision
627
+ # not SLL.
628
+ if reach.uniqueAlt != ATN.INVALID_ALT_NUMBER :
629
+ self.reportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.index)
630
+ return predictedAlt
631
+
632
+ # We do not check predicates here because we have checked them
633
+ # on-the-fly when doing full context prediction.
634
+
635
+ #
636
+ # In non-exact ambiguity detection mode, we might actually be able to
637
+ # detect an exact ambiguity, but I'm not going to spend the cycles
638
+ # needed to check. We only emit ambiguity warnings in exact ambiguity
639
+ # mode.
640
+ #
641
+ # For example, we might know that we have conflicting configurations.
642
+ # But, that does not mean that there is no way forward without a
643
+ # conflict. It's possible to have nonconflicting alt subsets as in:
644
+
645
+ # altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
646
+
647
+ # from
648
+ #
649
+ # [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
650
+ # (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
651
+ #
652
+ # In this case, (17,1,[5 $]) indicates there is some next sequence that
653
+ # would resolve this without conflict to alternative 1. Any other viable
654
+ # next sequence, however, is associated with a conflict. We stop
655
+ # looking for input because no amount of further lookahead will alter
656
+ # the fact that we should predict alternative 1. We just can't say for
657
+ # sure that there is an ambiguity without looking further.
658
+
659
+ self.reportAmbiguity(dfa, D, startIndex, input.index, foundExactAmbig, None, reach)
660
+
661
+ return predictedAlt
662
+
663
+ def computeReachSet(self, closure:ATNConfigSet, t:int, fullCtx:bool):
664
+ if ParserATNSimulator.debug:
665
+ print("in computeReachSet, starting closure: " + str(closure))
666
+
667
+ if self.mergeCache is None:
668
+ self.mergeCache = dict()
669
+
670
+ intermediate = ATNConfigSet(fullCtx)
671
+
672
+ # Configurations already in a rule stop state indicate reaching the end
673
+ # of the decision rule (local context) or end of the start rule (full
674
+ # context). Once reached, these configurations are never updated by a
675
+ # closure operation, so they are handled separately for the performance
676
+ # advantage of having a smaller intermediate set when calling closure.
677
+ #
678
+ # For full-context reach operations, separate handling is required to
679
+ # ensure that the alternative matching the longest overall sequence is
680
+ # chosen when multiple such configurations can match the input.
681
+
682
+ skippedStopStates = None
683
+
684
+ # First figure out where we can reach on input t
685
+ for c in closure:
686
+ if ParserATNSimulator.debug:
687
+ print("testing " + self.getTokenName(t) + " at " + str(c))
688
+
689
+ if isinstance(c.state, RuleStopState):
690
+ if fullCtx or t == Token.EOF:
691
+ if skippedStopStates is None:
692
+ skippedStopStates = list()
693
+ skippedStopStates.append(c)
694
+ continue
695
+
696
+ for trans in c.state.transitions:
697
+ target = self.getReachableTarget(trans, t)
698
+ if target is not None:
699
+ intermediate.add(ATNConfig(state=target, config=c), self.mergeCache)
700
+
701
+ # Now figure out where the reach operation can take us...
702
+
703
+ reach = None
704
+
705
+ # This block optimizes the reach operation for intermediate sets which
706
+ # trivially indicate a termination state for the overall
707
+ # adaptivePredict operation.
708
+ #
709
+ # The conditions assume that intermediate
710
+ # contains all configurations relevant to the reach set, but this
711
+ # condition is not true when one or more configurations have been
712
+ # withheld in skippedStopStates, or when the current symbol is EOF.
713
+ #
714
+ if skippedStopStates is None and t!=Token.EOF:
715
+ if len(intermediate)==1:
716
+ # Don't pursue the closure if there is just one state.
717
+ # It can only have one alternative; just add to result
718
+ # Also don't pursue the closure if there is unique alternative
719
+ # among the configurations.
720
+ reach = intermediate
721
+ elif self.getUniqueAlt(intermediate)!=ATN.INVALID_ALT_NUMBER:
722
+ # Also don't pursue the closure if there is unique alternative
723
+ # among the configurations.
724
+ reach = intermediate
725
+
726
+ # If the reach set could not be trivially determined, perform a closure
727
+ # operation on the intermediate set to compute its initial value.
728
+ #
729
+ if reach is None:
730
+ reach = ATNConfigSet(fullCtx)
731
+ closureBusy = set()
732
+ treatEofAsEpsilon = t == Token.EOF
733
+ for c in intermediate:
734
+ self.closure(c, reach, closureBusy, False, fullCtx, treatEofAsEpsilon)
735
+
736
+ if t == Token.EOF:
737
+ # After consuming EOF no additional input is possible, so we are
738
+ # only interested in configurations which reached the end of the
739
+ # decision rule (local context) or end of the start rule (full
740
+ # context). Update reach to contain only these configurations. This
741
+ # handles both explicit EOF transitions in the grammar and implicit
742
+ # EOF transitions following the end of the decision or start rule.
743
+ #
744
+ # When reach==intermediate, no closure operation was performed. In
745
+ # this case, removeAllConfigsNotInRuleStopState needs to check for
746
+ # reachable rule stop states as well as configurations already in
747
+ # a rule stop state.
748
+ #
749
+ # This is handled before the configurations in skippedStopStates,
750
+ # because any configurations potentially added from that list are
751
+ # already guaranteed to meet this condition whether or not it's
752
+ # required.
753
+ #
754
+ reach = self.removeAllConfigsNotInRuleStopState(reach, reach is intermediate)
755
+
756
+ # If skippedStopStates is not null, then it contains at least one
757
+ # configuration. For full-context reach operations, these
758
+ # configurations reached the end of the start rule, in which case we
759
+ # only add them back to reach if no configuration during the current
760
+ # closure operation reached such a state. This ensures adaptivePredict
761
+ # chooses an alternative matching the longest overall sequence when
762
+ # multiple alternatives are viable.
763
+ #
764
+ if skippedStopStates is not None and ( (not fullCtx) or (not PredictionMode.hasConfigInRuleStopState(reach))):
765
+ for c in skippedStopStates:
766
+ reach.add(c, self.mergeCache)
767
+ if len(reach)==0:
768
+ return None
769
+ else:
770
+ return reach
771
+
772
+ #
773
+ # Return a configuration set containing only the configurations from
774
+ # {@code configs} which are in a {@link RuleStopState}. If all
775
+ # configurations in {@code configs} are already in a rule stop state, this
776
+ # method simply returns {@code configs}.
777
+ #
778
+ # <p>When {@code lookToEndOfRule} is true, this method uses
779
+ # {@link ATN#nextTokens} for each configuration in {@code configs} which is
780
+ # not already in a rule stop state to see if a rule stop state is reachable
781
+ # from the configuration via epsilon-only transitions.</p>
782
+ #
783
+ # @param configs the configuration set to update
784
+ # @param lookToEndOfRule when true, this method checks for rule stop states
785
+ # reachable by epsilon-only transitions from each configuration in
786
+ # {@code configs}.
787
+ #
788
+ # @return {@code configs} if all configurations in {@code configs} are in a
789
+ # rule stop state, otherwise return a new configuration set containing only
790
+ # the configurations from {@code configs} which are in a rule stop state
791
+ #
792
+ def removeAllConfigsNotInRuleStopState(self, configs:ATNConfigSet, lookToEndOfRule:bool):
793
+ if PredictionMode.allConfigsInRuleStopStates(configs):
794
+ return configs
795
+ result = ATNConfigSet(configs.fullCtx)
796
+ for config in configs:
797
+ if isinstance(config.state, RuleStopState):
798
+ result.add(config, self.mergeCache)
799
+ continue
800
+ if lookToEndOfRule and config.state.epsilonOnlyTransitions:
801
+ nextTokens = self.atn.nextTokens(config.state)
802
+ if Token.EPSILON in nextTokens:
803
+ endOfRuleState = self.atn.ruleToStopState[config.state.ruleIndex]
804
+ result.add(ATNConfig(state=endOfRuleState, config=config), self.mergeCache)
805
+ return result
806
+
807
+ def computeStartState(self, p:ATNState, ctx:RuleContext, fullCtx:bool):
808
+ # always at least the implicit call to start rule
809
+ initialContext = PredictionContextFromRuleContext(self.atn, ctx)
810
+ configs = ATNConfigSet(fullCtx)
811
+
812
+ for i in range(0, len(p.transitions)):
813
+ target = p.transitions[i].target
814
+ c = ATNConfig(target, i+1, initialContext)
815
+ closureBusy = set()
816
+ self.closure(c, configs, closureBusy, True, fullCtx, False)
817
+ return configs
818
+
819
+ #
820
+ # This method transforms the start state computed by
821
+ # {@link #computeStartState} to the special start state used by a
822
+ # precedence DFA for a particular precedence value. The transformation
823
+ # process applies the following changes to the start state's configuration
824
+ # set.
825
+ #
826
+ # <ol>
827
+ # <li>Evaluate the precedence predicates for each configuration using
828
+ # {@link SemanticContext#evalPrecedence}.</li>
829
+ # <li>Remove all configurations which predict an alternative greater than
830
+ # 1, for which another configuration that predicts alternative 1 is in the
831
+ # same ATN state with the same prediction context. This transformation is
832
+ # valid for the following reasons:
833
+ # <ul>
834
+ # <li>The closure block cannot contain any epsilon transitions which bypass
835
+ # the body of the closure, so all states reachable via alternative 1 are
836
+ # part of the precedence alternatives of the transformed left-recursive
837
+ # rule.</li>
838
+ # <li>The "primary" portion of a left recursive rule cannot contain an
839
+ # epsilon transition, so the only way an alternative other than 1 can exist
840
+ # in a state that is also reachable via alternative 1 is by nesting calls
841
+ # to the left-recursive rule, with the outer calls not being at the
842
+ # preferred precedence level.</li>
843
+ # </ul>
844
+ # </li>
845
+ # </ol>
846
+ #
847
+ # <p>
848
+ # The prediction context must be considered by this filter to address
849
+ # situations like the following.
850
+ # </p>
851
+ # <code>
852
+ # <pre>
853
+ # grammar TA;
854
+ # prog: statement* EOF;
855
+ # statement: letterA | statement letterA 'b' ;
856
+ # letterA: 'a';
857
+ # </pre>
858
+ # </code>
859
+ # <p>
860
+ # If the above grammar, the ATN state immediately before the token
861
+ # reference {@code 'a'} in {@code letterA} is reachable from the left edge
862
+ # of both the primary and closure blocks of the left-recursive rule
863
+ # {@code statement}. The prediction context associated with each of these
864
+ # configurations distinguishes between them, and prevents the alternative
865
+ # which stepped out to {@code prog} (and then back in to {@code statement}
866
+ # from being eliminated by the filter.
867
+ # </p>
868
+ #
869
+ # @param configs The configuration set computed by
870
+ # {@link #computeStartState} as the start state for the DFA.
871
+ # @return The transformed configuration set representing the start state
872
+ # for a precedence DFA at a particular precedence level (determined by
873
+ # calling {@link Parser#getPrecedence}).
874
+ #
875
+ def applyPrecedenceFilter(self, configs:ATNConfigSet):
876
+ statesFromAlt1 = dict()
877
+ configSet = ATNConfigSet(configs.fullCtx)
878
+ for config in configs:
879
+ # handle alt 1 first
880
+ if config.alt != 1:
881
+ continue
882
+ updatedContext = config.semanticContext.evalPrecedence(self.parser, self._outerContext)
883
+ if updatedContext is None:
884
+ # the configuration was eliminated
885
+ continue
886
+
887
+ statesFromAlt1[config.state.stateNumber] = config.context
888
+ if updatedContext is not config.semanticContext:
889
+ configSet.add(ATNConfig(config=config, semantic=updatedContext), self.mergeCache)
890
+ else:
891
+ configSet.add(config, self.mergeCache)
892
+
893
+ for config in configs:
894
+ if config.alt == 1:
895
+ # already handled
896
+ continue
897
+
898
+ # In the future, this elimination step could be updated to also
899
+ # filter the prediction context for alternatives predicting alt>1
900
+ # (basically a graph subtraction algorithm).
901
+ #
902
+ if not config.precedenceFilterSuppressed:
903
+ context = statesFromAlt1.get(config.state.stateNumber, None)
904
+ if context==config.context:
905
+ # eliminated
906
+ continue
907
+
908
+ configSet.add(config, self.mergeCache)
909
+
910
+ return configSet
911
+
912
+ def getReachableTarget(self, trans:Transition, ttype:int):
913
+ if trans.matches(ttype, 0, self.atn.maxTokenType):
914
+ return trans.target
915
+ else:
916
+ return None
917
+
918
+ def getPredsForAmbigAlts(self, ambigAlts:set, configs:ATNConfigSet, nalts:int):
919
+ # REACH=[1|1|[]|0:0, 1|2|[]|0:1]
920
+ # altToPred starts as an array of all null contexts. The entry at index i
921
+ # corresponds to alternative i. altToPred[i] may have one of three values:
922
+ # 1. null: no ATNConfig c is found such that c.alt==i
923
+ # 2. SemanticContext.NONE: At least one ATNConfig c exists such that
924
+ # c.alt==i and c.semanticContext==SemanticContext.NONE. In other words,
925
+ # alt i has at least one unpredicated config.
926
+ # 3. Non-NONE Semantic Context: There exists at least one, and for all
927
+ # ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE.
928
+ #
929
+ # From this, it is clear that NONE||anything==NONE.
930
+ #
931
+ altToPred = [None] * (nalts + 1)
932
+ for c in configs:
933
+ if c.alt in ambigAlts:
934
+ altToPred[c.alt] = orContext(altToPred[c.alt], c.semanticContext)
935
+
936
+ nPredAlts = 0
937
+ for i in range(1, nalts+1):
938
+ if altToPred[i] is None:
939
+ altToPred[i] = SemanticContext.NONE
940
+ elif altToPred[i] is not SemanticContext.NONE:
941
+ nPredAlts += 1
942
+
943
+ # nonambig alts are null in altToPred
944
+ if nPredAlts==0:
945
+ altToPred = None
946
+ if ParserATNSimulator.debug:
947
+ print("getPredsForAmbigAlts result " + str_list(altToPred))
948
+ return altToPred
949
+
950
+ def getPredicatePredictions(self, ambigAlts:set, altToPred:list):
951
+ pairs = []
952
+ containsPredicate = False
953
+ for i in range(1, len(altToPred)):
954
+ pred = altToPred[i]
955
+ # unpredicated is indicated by SemanticContext.NONE
956
+ if ambigAlts is not None and i in ambigAlts:
957
+ pairs.append(PredPrediction(pred, i))
958
+ if pred is not SemanticContext.NONE:
959
+ containsPredicate = True
960
+
961
+ if not containsPredicate:
962
+ return None
963
+
964
+ return pairs
965
+
966
+ #
967
+ # This method is used to improve the localization of error messages by
968
+ # choosing an alternative rather than throwing a
969
+ # {@link NoViableAltException} in particular prediction scenarios where the
970
+ # {@link #ERROR} state was reached during ATN simulation.
971
+ #
972
+ # <p>
973
+ # The default implementation of this method uses the following
974
+ # algorithm to identify an ATN configuration which successfully parsed the
975
+ # decision entry rule. Choosing such an alternative ensures that the
976
+ # {@link ParserRuleContext} returned by the calling rule will be complete
977
+ # and valid, and the syntax error will be reported later at a more
978
+ # localized location.</p>
979
+ #
980
+ # <ul>
981
+ # <li>If a syntactically valid path or paths reach the end of the decision rule and
982
+ # they are semantically valid if predicated, return the min associated alt.</li>
983
+ # <li>Else, if a semantically invalid but syntactically valid path exist
984
+ # or paths exist, return the minimum associated alt.
985
+ # </li>
986
+ # <li>Otherwise, return {@link ATN#INVALID_ALT_NUMBER}.</li>
987
+ # </ul>
988
+ #
989
+ # <p>
990
+ # In some scenarios, the algorithm described above could predict an
991
+ # alternative which will result in a {@link FailedPredicateException} in
992
+ # the parser. Specifically, this could occur if the <em>only</em> configuration
993
+ # capable of successfully parsing to the end of the decision rule is
994
+ # blocked by a semantic predicate. By choosing this alternative within
995
+ # {@link #adaptivePredict} instead of throwing a
996
+ # {@link NoViableAltException}, the resulting
997
+ # {@link FailedPredicateException} in the parser will identify the specific
998
+ # predicate which is preventing the parser from successfully parsing the
999
+ # decision rule, which helps developers identify and correct logic errors
1000
+ # in semantic predicates.
1001
+ # </p>
1002
+ #
1003
+ # @param configs The ATN configurations which were valid immediately before
1004
+ # the {@link #ERROR} state was reached
1005
+ # @param outerContext The is the \gamma_0 initial parser context from the paper
1006
+ # or the parser stack at the instant before prediction commences.
1007
+ #
1008
+ # @return The value to return from {@link #adaptivePredict}, or
1009
+ # {@link ATN#INVALID_ALT_NUMBER} if a suitable alternative was not
1010
+ # identified and {@link #adaptivePredict} should report an error instead.
1011
+ #
1012
+ def getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(self, configs:ATNConfigSet, outerContext:ParserRuleContext):
1013
+ semValidConfigs, semInvalidConfigs = self.splitAccordingToSemanticValidity(configs, outerContext)
1014
+ alt = self.getAltThatFinishedDecisionEntryRule(semValidConfigs)
1015
+ if alt!=ATN.INVALID_ALT_NUMBER: # semantically/syntactically viable path exists
1016
+ return alt
1017
+ # Is there a syntactically valid path with a failed pred?
1018
+ if len(semInvalidConfigs)>0:
1019
+ alt = self.getAltThatFinishedDecisionEntryRule(semInvalidConfigs)
1020
+ if alt!=ATN.INVALID_ALT_NUMBER: # syntactically viable path exists
1021
+ return alt
1022
+ return ATN.INVALID_ALT_NUMBER
1023
+
1024
+ def getAltThatFinishedDecisionEntryRule(self, configs:ATNConfigSet):
1025
+ alts = set()
1026
+ for c in configs:
1027
+ if c.reachesIntoOuterContext>0 or (isinstance(c.state, RuleStopState) and c.context.hasEmptyPath() ):
1028
+ alts.add(c.alt)
1029
+ if len(alts)==0:
1030
+ return ATN.INVALID_ALT_NUMBER
1031
+ else:
1032
+ return min(alts)
1033
+
1034
+ # Walk the list of configurations and split them according to
1035
+ # those that have preds evaluating to true/false. If no pred, assume
1036
+ # true pred and include in succeeded set. Returns Pair of sets.
1037
+ #
1038
+ # Create a new set so as not to alter the incoming parameter.
1039
+ #
1040
+ # Assumption: the input stream has been restored to the starting point
1041
+ # prediction, which is where predicates need to evaluate.
1042
+ #
1043
+ def splitAccordingToSemanticValidity(self, configs:ATNConfigSet, outerContext:ParserRuleContext):
1044
+ succeeded = ATNConfigSet(configs.fullCtx)
1045
+ failed = ATNConfigSet(configs.fullCtx)
1046
+ for c in configs:
1047
+ if c.semanticContext is not SemanticContext.NONE:
1048
+ predicateEvaluationResult = c.semanticContext.eval(self.parser, outerContext)
1049
+ if predicateEvaluationResult:
1050
+ succeeded.add(c)
1051
+ else:
1052
+ failed.add(c)
1053
+ else:
1054
+ succeeded.add(c)
1055
+ return (succeeded,failed)
1056
+
1057
+ # Look through a list of predicate/alt pairs, returning alts for the
1058
+ # pairs that win. A {@code NONE} predicate indicates an alt containing an
1059
+ # unpredicated config which behaves as "always true." If !complete
1060
+ # then we stop at the first predicate that evaluates to true. This
1061
+ # includes pairs with null predicates.
1062
+ #
1063
+ def evalSemanticContext(self, predPredictions:list, outerContext:ParserRuleContext, complete:bool):
1064
+ predictions = set()
1065
+ for pair in predPredictions:
1066
+ if pair.pred is SemanticContext.NONE:
1067
+ predictions.add(pair.alt)
1068
+ if not complete:
1069
+ break
1070
+ continue
1071
+ predicateEvaluationResult = pair.pred.eval(self.parser, outerContext)
1072
+ if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
1073
+ print("eval pred " + str(pair) + "=" + str(predicateEvaluationResult))
1074
+
1075
+ if predicateEvaluationResult:
1076
+ if ParserATNSimulator.debug or ParserATNSimulator.dfa_debug:
1077
+ print("PREDICT " + str(pair.alt))
1078
+ predictions.add(pair.alt)
1079
+ if not complete:
1080
+ break
1081
+ return predictions
1082
+
1083
+
1084
+ # TODO: If we are doing predicates, there is no point in pursuing
1085
+ # closure operations if we reach a DFA state that uniquely predicts
1086
+ # alternative. We will not be caching that DFA state and it is a
1087
+ # waste to pursue the closure. Might have to advance when we do
1088
+ # ambig detection thought :(
1089
+ #
1090
+
1091
+ def closure(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, treatEofAsEpsilon:bool):
1092
+ initialDepth = 0
1093
+ self.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
1094
+ fullCtx, initialDepth, treatEofAsEpsilon)
1095
+
1096
+
1097
+ def closureCheckingStopState(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool):
1098
+ if ParserATNSimulator.debug:
1099
+ print("closure(" + str(config) + ")")
1100
+
1101
+ if isinstance(config.state, RuleStopState):
1102
+ # We hit rule end. If we have context info, use it
1103
+ # run thru all possible stack tops in ctx
1104
+ if not config.context.isEmpty():
1105
+ for i in range(0, len(config.context)):
1106
+ state = config.context.getReturnState(i)
1107
+ if state is PredictionContext.EMPTY_RETURN_STATE:
1108
+ if fullCtx:
1109
+ configs.add(ATNConfig(state=config.state, context=PredictionContext.EMPTY, config=config), self.mergeCache)
1110
+ continue
1111
+ else:
1112
+ # we have no context info, just chase follow links (if greedy)
1113
+ if ParserATNSimulator.debug:
1114
+ print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
1115
+ self.closure_(config, configs, closureBusy, collectPredicates,
1116
+ fullCtx, depth, treatEofAsEpsilon)
1117
+ continue
1118
+ returnState = self.atn.states[state]
1119
+ newContext = config.context.getParent(i) # "pop" return state
1120
+ c = ATNConfig(state=returnState, alt=config.alt, context=newContext, semantic=config.semanticContext)
1121
+ # While we have context to pop back from, we may have
1122
+ # gotten that context AFTER having falling off a rule.
1123
+ # Make sure we track that we are now out of context.
1124
+ c.reachesIntoOuterContext = config.reachesIntoOuterContext
1125
+ self.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth - 1, treatEofAsEpsilon)
1126
+ return
1127
+ elif fullCtx:
1128
+ # reached end of start rule
1129
+ configs.add(config, self.mergeCache)
1130
+ return
1131
+ else:
1132
+ # else if we have no context info, just chase follow links (if greedy)
1133
+ if ParserATNSimulator.debug:
1134
+ print("FALLING off rule " + self.getRuleName(config.state.ruleIndex))
1135
+
1136
+ self.closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon)
1137
+
1138
+ # Do the actual work of walking epsilon edges#
1139
+ def closure_(self, config:ATNConfig, configs:ATNConfigSet, closureBusy:set, collectPredicates:bool, fullCtx:bool, depth:int, treatEofAsEpsilon:bool):
1140
+ p = config.state
1141
+ # optimization
1142
+ if not p.epsilonOnlyTransitions:
1143
+ configs.add(config, self.mergeCache)
1144
+ # make sure to not return here, because EOF transitions can act as
1145
+ # both epsilon transitions and non-epsilon transitions.
1146
+
1147
+ first = True
1148
+ for t in p.transitions:
1149
+ if first:
1150
+ first = False
1151
+ if self.canDropLoopEntryEdgeInLeftRecursiveRule(config):
1152
+ continue
1153
+
1154
+ continueCollecting = collectPredicates and not isinstance(t, ActionTransition)
1155
+ c = self.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEofAsEpsilon)
1156
+ if c is not None:
1157
+ newDepth = depth
1158
+ if isinstance( config.state, RuleStopState):
1159
+ # target fell off end of rule; mark resulting c as having dipped into outer context
1160
+ # We can't get here if incoming config was rule stop and we had context
1161
+ # track how far we dip into outer context. Might
1162
+ # come in handy and we avoid evaluating context dependent
1163
+ # preds if this is > 0.
1164
+ if self._dfa is not None and self._dfa.precedenceDfa:
1165
+ if t.outermostPrecedenceReturn == self._dfa.atnStartState.ruleIndex:
1166
+ c.precedenceFilterSuppressed = True
1167
+ c.reachesIntoOuterContext += 1
1168
+ if c in closureBusy:
1169
+ # avoid infinite recursion for right-recursive rules
1170
+ continue
1171
+ closureBusy.add(c)
1172
+ configs.dipsIntoOuterContext = True # TODO: can remove? only care when we add to set per middle of this method
1173
+ newDepth -= 1
1174
+ if ParserATNSimulator.debug:
1175
+ print("dips into outer ctx: " + str(c))
1176
+ else:
1177
+ if not t.isEpsilon:
1178
+ if c in closureBusy:
1179
+ # avoid infinite recursion for EOF* and EOF+
1180
+ continue
1181
+ closureBusy.add(c)
1182
+ if isinstance(t, RuleTransition):
1183
+ # latch when newDepth goes negative - once we step out of the entry context we can't return
1184
+ if newDepth >= 0:
1185
+ newDepth += 1
1186
+
1187
+ self.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEofAsEpsilon)
1188
+
1189
+
1190
+
1191
+ # Implements first-edge (loop entry) elimination as an optimization
1192
+ # during closure operations. See antlr/antlr4#1398.
1193
+ #
1194
+ # The optimization is to avoid adding the loop entry config when
1195
+ # the exit path can only lead back to the same
1196
+ # StarLoopEntryState after popping context at the rule end state
1197
+ # (traversing only epsilon edges, so we're still in closure, in
1198
+ # this same rule).
1199
+ #
1200
+ # We need to detect any state that can reach loop entry on
1201
+ # epsilon w/o exiting rule. We don't have to look at FOLLOW
1202
+ # links, just ensure that all stack tops for config refer to key
1203
+ # states in LR rule.
1204
+ #
1205
+ # To verify we are in the right situation we must first check
1206
+ # closure is at a StarLoopEntryState generated during LR removal.
1207
+ # Then we check that each stack top of context is a return state
1208
+ # from one of these cases:
1209
+ #
1210
+ # 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state
1211
+ # 2. expr op expr. The return state is the block end of internal block of (...)*
1212
+ # 3. 'between' expr 'and' expr. The return state of 2nd expr reference.
1213
+ # That state points at block end of internal block of (...)*.
1214
+ # 4. expr '?' expr ':' expr. The return state points at block end,
1215
+ # which points at loop entry state.
1216
+ #
1217
+ # If any is true for each stack top, then closure does not add a
1218
+ # config to the current config set for edge[0], the loop entry branch.
1219
+ #
1220
+ # Conditions fail if any context for the current config is:
1221
+ #
1222
+ # a. empty (we'd fall out of expr to do a global FOLLOW which could
1223
+ # even be to some weird spot in expr) or,
1224
+ # b. lies outside of expr or,
1225
+ # c. lies within expr but at a state not the BlockEndState
1226
+ # generated during LR removal
1227
+ #
1228
+ # Do we need to evaluate predicates ever in closure for this case?
1229
+ #
1230
+ # No. Predicates, including precedence predicates, are only
1231
+ # evaluated when computing a DFA start state. I.e., only before
1232
+ # the lookahead (but not parser) consumes a token.
1233
+ #
1234
+ # There are no epsilon edges allowed in LR rule alt blocks or in
1235
+ # the "primary" part (ID here). If closure is in
1236
+ # StarLoopEntryState any lookahead operation will have consumed a
1237
+ # token as there are no epsilon-paths that lead to
1238
+ # StarLoopEntryState. We do not have to evaluate predicates
1239
+ # therefore if we are in the generated StarLoopEntryState of a LR
1240
+ # rule. Note that when making a prediction starting at that
1241
+ # decision point, decision d=2, compute-start-state performs
1242
+ # closure starting at edges[0], edges[1] emanating from
1243
+ # StarLoopEntryState. That means it is not performing closure on
1244
+ # StarLoopEntryState during compute-start-state.
1245
+ #
1246
+ # How do we know this always gives same prediction answer?
1247
+ #
1248
+ # Without predicates, loop entry and exit paths are ambiguous
1249
+ # upon remaining input +b (in, say, a+b). Either paths lead to
1250
+ # valid parses. Closure can lead to consuming + immediately or by
1251
+ # falling out of this call to expr back into expr and loop back
1252
+ # again to StarLoopEntryState to match +b. In this special case,
1253
+ # we choose the more efficient path, which is to take the bypass
1254
+ # path.
1255
+ #
1256
+ # The lookahead language has not changed because closure chooses
1257
+ # one path over the other. Both paths lead to consuming the same
1258
+ # remaining input during a lookahead operation. If the next token
1259
+ # is an operator, lookahead will enter the choice block with
1260
+ # operators. If it is not, lookahead will exit expr. Same as if
1261
+ # closure had chosen to enter the choice block immediately.
1262
+ #
1263
+ # Closure is examining one config (some loopentrystate, some alt,
1264
+ # context) which means it is considering exactly one alt. Closure
1265
+ # always copies the same alt to any derived configs.
1266
+ #
1267
+ # How do we know this optimization doesn't mess up precedence in
1268
+ # our parse trees?
1269
+ #
1270
+ # Looking through expr from left edge of stat only has to confirm
1271
+ # that an input, say, a+b+c; begins with any valid interpretation
1272
+ # of an expression. The precedence actually doesn't matter when
1273
+ # making a decision in stat seeing through expr. It is only when
1274
+ # parsing rule expr that we must use the precedence to get the
1275
+ # right interpretation and, hence, parse tree.
1276
+ #
1277
+ # @since 4.6
1278
+ #
1279
+ def canDropLoopEntryEdgeInLeftRecursiveRule(self, config):
1280
+ # return False
1281
+ p = config.state
1282
+ # First check to see if we are in StarLoopEntryState generated during
1283
+ # left-recursion elimination. For efficiency, also check if
1284
+ # the context has an empty stack case. If so, it would mean
1285
+ # global FOLLOW so we can't perform optimization
1286
+ # Are we the special loop entry/exit state? or SLL wildcard
1287
+ if p.stateType != ATNState.STAR_LOOP_ENTRY \
1288
+ or not p.isPrecedenceDecision \
1289
+ or config.context.isEmpty() \
1290
+ or config.context.hasEmptyPath():
1291
+ return False
1292
+
1293
+ # Require all return states to return back to the same rule
1294
+ # that p is in.
1295
+ numCtxs = len(config.context)
1296
+ for i in range(0, numCtxs): # for each stack context
1297
+ returnState = self.atn.states[config.context.getReturnState(i)]
1298
+ if returnState.ruleIndex != p.ruleIndex:
1299
+ return False
1300
+
1301
+ decisionStartState = p.transitions[0].target
1302
+ blockEndStateNum = decisionStartState.endState.stateNumber
1303
+ blockEndState = self.atn.states[blockEndStateNum]
1304
+
1305
+ # Verify that the top of each stack context leads to loop entry/exit
1306
+ # state through epsilon edges and w/o leaving rule.
1307
+ for i in range(0, numCtxs): # for each stack context
1308
+ returnStateNumber = config.context.getReturnState(i)
1309
+ returnState = self.atn.states[returnStateNumber]
1310
+ # all states must have single outgoing epsilon edge
1311
+ if len(returnState.transitions) != 1 or not returnState.transitions[0].isEpsilon:
1312
+ return False
1313
+
1314
+ # Look for prefix op case like 'not expr', (' type ')' expr
1315
+ returnStateTarget = returnState.transitions[0].target
1316
+ if returnState.stateType == ATNState.BLOCK_END and returnStateTarget is p:
1317
+ continue
1318
+
1319
+ # Look for 'expr op expr' or case where expr's return state is block end
1320
+ # of (...)* internal block; the block end points to loop back
1321
+ # which points to p but we don't need to check that
1322
+ if returnState is blockEndState:
1323
+ continue
1324
+
1325
+ # Look for ternary expr ? expr : expr. The return state points at block end,
1326
+ # which points at loop entry state
1327
+ if returnStateTarget is blockEndState:
1328
+ continue
1329
+
1330
+ # Look for complex prefix 'between expr and expr' case where 2nd expr's
1331
+ # return state points at block end state of (...)* internal block
1332
+ if returnStateTarget.stateType == ATNState.BLOCK_END \
1333
+ and len(returnStateTarget.transitions) == 1 \
1334
+ and returnStateTarget.transitions[0].isEpsilon \
1335
+ and returnStateTarget.transitions[0].target is p:
1336
+ continue
1337
+
1338
+ # anything else ain't conforming
1339
+ return False
1340
+
1341
+ return True
1342
+
1343
+
1344
+ def getRuleName(self, index:int):
1345
+ if self.parser is not None and index>=0:
1346
+ return self.parser.ruleNames[index]
1347
+ else:
1348
+ return "<rule " + str(index) + ">"
1349
+
1350
+ epsilonTargetMethods = dict()
1351
+ epsilonTargetMethods[Transition.RULE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
1352
+ sim.ruleTransition(config, t)
1353
+ epsilonTargetMethods[Transition.PRECEDENCE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
1354
+ sim.precedenceTransition(config, t, collectPredicates, inContext, fullCtx)
1355
+ epsilonTargetMethods[Transition.PREDICATE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
1356
+ sim.predTransition(config, t, collectPredicates, inContext, fullCtx)
1357
+ epsilonTargetMethods[Transition.ACTION] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
1358
+ sim.actionTransition(config, t)
1359
+ epsilonTargetMethods[Transition.EPSILON] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
1360
+ ATNConfig(state=t.target, config=config)
1361
+ epsilonTargetMethods[Transition.ATOM] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
1362
+ ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None
1363
+ epsilonTargetMethods[Transition.RANGE] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
1364
+ ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None
1365
+ epsilonTargetMethods[Transition.SET] = lambda sim, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon: \
1366
+ ATNConfig(state=t.target, config=config) if treatEofAsEpsilon and t.matches(Token.EOF, 0, 1) else None
1367
+
1368
+ def getEpsilonTarget(self, config:ATNConfig, t:Transition, collectPredicates:bool, inContext:bool, fullCtx:bool, treatEofAsEpsilon:bool):
1369
+ m = self.epsilonTargetMethods.get(t.serializationType, None)
1370
+ if m is None:
1371
+ return None
1372
+ else:
1373
+ return m(self, config, t, collectPredicates, inContext, fullCtx, treatEofAsEpsilon)
1374
+
1375
+ def actionTransition(self, config:ATNConfig, t:ActionTransition):
1376
+ if ParserATNSimulator.debug:
1377
+ print("ACTION edge " + str(t.ruleIndex) + ":" + str(t.actionIndex))
1378
+ return ATNConfig(state=t.target, config=config)
1379
+
1380
+ def precedenceTransition(self, config:ATNConfig, pt:PrecedencePredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
1381
+ if ParserATNSimulator.debug:
1382
+ print("PRED (collectPredicates=" + str(collectPredicates) + ") " +
1383
+ str(pt.precedence) + ">=_p, ctx dependent=true")
1384
+ if self.parser is not None:
1385
+ print("context surrounding pred is " + str(self.parser.getRuleInvocationStack()))
1386
+
1387
+ c = None
1388
+ if collectPredicates and inContext:
1389
+ if fullCtx:
1390
+ # In full context mode, we can evaluate predicates on-the-fly
1391
+ # during closure, which dramatically reduces the size of
1392
+ # the config sets. It also obviates the need to test predicates
1393
+ # later during conflict resolution.
1394
+ currentPosition = self._input.index
1395
+ self._input.seek(self._startIndex)
1396
+ predSucceeds = pt.getPredicate().eval(self.parser, self._outerContext)
1397
+ self._input.seek(currentPosition)
1398
+ if predSucceeds:
1399
+ c = ATNConfig(state=pt.target, config=config) # no pred context
1400
+ else:
1401
+ newSemCtx = andContext(config.semanticContext, pt.getPredicate())
1402
+ c = ATNConfig(state=pt.target, semantic=newSemCtx, config=config)
1403
+ else:
1404
+ c = ATNConfig(state=pt.target, config=config)
1405
+
1406
+ if ParserATNSimulator.debug:
1407
+ print("config from pred transition=" + str(c))
1408
+ return c
1409
+
1410
+ def predTransition(self, config:ATNConfig, pt:PredicateTransition, collectPredicates:bool, inContext:bool, fullCtx:bool):
1411
+ if ParserATNSimulator.debug:
1412
+ print("PRED (collectPredicates=" + str(collectPredicates) + ") " + str(pt.ruleIndex) +
1413
+ ":" + str(pt.predIndex) + ", ctx dependent=" + str(pt.isCtxDependent))
1414
+ if self.parser is not None:
1415
+ print("context surrounding pred is " + str(self.parser.getRuleInvocationStack()))
1416
+
1417
+ c = None
1418
+ if collectPredicates and (not pt.isCtxDependent or (pt.isCtxDependent and inContext)):
1419
+ if fullCtx:
1420
+ # In full context mode, we can evaluate predicates on-the-fly
1421
+ # during closure, which dramatically reduces the size of
1422
+ # the config sets. It also obviates the need to test predicates
1423
+ # later during conflict resolution.
1424
+ currentPosition = self._input.index
1425
+ self._input.seek(self._startIndex)
1426
+ predSucceeds = pt.getPredicate().eval(self.parser, self._outerContext)
1427
+ self._input.seek(currentPosition)
1428
+ if predSucceeds:
1429
+ c = ATNConfig(state=pt.target, config=config) # no pred context
1430
+ else:
1431
+ newSemCtx = andContext(config.semanticContext, pt.getPredicate())
1432
+ c = ATNConfig(state=pt.target, semantic=newSemCtx, config=config)
1433
+ else:
1434
+ c = ATNConfig(state=pt.target, config=config)
1435
+
1436
+ if ParserATNSimulator.debug:
1437
+ print("config from pred transition=" + str(c))
1438
+ return c
1439
+
1440
+ def ruleTransition(self, config:ATNConfig, t:RuleTransition):
1441
+ if ParserATNSimulator.debug:
1442
+ print("CALL rule " + self.getRuleName(t.target.ruleIndex) + ", ctx=" + str(config.context))
1443
+ returnState = t.followState
1444
+ newContext = SingletonPredictionContext.create(config.context, returnState.stateNumber)
1445
+ return ATNConfig(state=t.target, context=newContext, config=config )
1446
+
1447
+ def getConflictingAlts(self, configs:ATNConfigSet):
1448
+ altsets = PredictionMode.getConflictingAltSubsets(configs)
1449
+ return PredictionMode.getAlts(altsets)
1450
+
1451
+ # Sam pointed out a problem with the previous definition, v3, of
1452
+ # ambiguous states. If we have another state associated with conflicting
1453
+ # alternatives, we should keep going. For example, the following grammar
1454
+ #
1455
+ # s : (ID | ID ID?) ';' ;
1456
+ #
1457
+ # When the ATN simulation reaches the state before ';', it has a DFA
1458
+ # state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
1459
+ # 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node
1460
+ # because alternative to has another way to continue, via [6|2|[]].
1461
+ # The key is that we have a single state that has config's only associated
1462
+ # with a single alternative, 2, and crucially the state transitions
1463
+ # among the configurations are all non-epsilon transitions. That means
1464
+ # we don't consider any conflicts that include alternative 2. So, we
1465
+ # ignore the conflict between alts 1 and 2. We ignore a set of
1466
+ # conflicting alts when there is an intersection with an alternative
1467
+ # associated with a single alt state in the state&rarr;config-list map.
1468
+ #
1469
+ # It's also the case that we might have two conflicting configurations but
1470
+ # also a 3rd nonconflicting configuration for a different alternative:
1471
+ # [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
1472
+ #
1473
+ # a : A | A | A B ;
1474
+ #
1475
+ # After matching input A, we reach the stop state for rule A, state 1.
1476
+ # State 8 is the state right before B. Clearly alternatives 1 and 2
1477
+ # conflict and no amount of further lookahead will separate the two.
1478
+ # However, alternative 3 will be able to continue and so we do not
1479
+ # stop working on this state. In the previous example, we're concerned
1480
+ # with states associated with the conflicting alternatives. Here alt
1481
+ # 3 is not associated with the conflicting configs, but since we can continue
1482
+ # looking for input reasonably, I don't declare the state done. We
1483
+ # ignore a set of conflicting alts when we have an alternative
1484
+ # that we still need to pursue.
1485
+ #
1486
+
1487
+ def getConflictingAltsOrUniqueAlt(self, configs:ATNConfigSet):
1488
+ conflictingAlts = None
1489
+ if configs.uniqueAlt!= ATN.INVALID_ALT_NUMBER:
1490
+ conflictingAlts = set()
1491
+ conflictingAlts.add(configs.uniqueAlt)
1492
+ else:
1493
+ conflictingAlts = configs.conflictingAlts
1494
+ return conflictingAlts
1495
+
1496
+ def getTokenName(self, t:int):
1497
+ if t==Token.EOF:
1498
+ return "EOF"
1499
+ if self.parser is not None and \
1500
+ self.parser.literalNames is not None and \
1501
+ t < len(self.parser.literalNames):
1502
+ return self.parser.literalNames[t] + "<" + str(t) + ">"
1503
+ else:
1504
+ return str(t)
1505
+
1506
+ def getLookaheadName(self, input:TokenStream):
1507
+ return self.getTokenName(input.LA(1))
1508
+
1509
+ # Used for debugging in adaptivePredict around execATN but I cut
1510
+ # it out for clarity now that alg. works well. We can leave this
1511
+ # "dead" code for a bit.
1512
+ #
1513
+ def dumpDeadEndConfigs(self, nvae:NoViableAltException):
1514
+ print("dead end configs: ")
1515
+ for c in nvae.getDeadEndConfigs():
1516
+ trans = "no edges"
1517
+ if len(c.state.transitions)>0:
1518
+ t = c.state.transitions[0]
1519
+ if isinstance(t, AtomTransition):
1520
+ trans = "Atom "+ self.getTokenName(t.label)
1521
+ elif isinstance(t, SetTransition):
1522
+ neg = isinstance(t, NotSetTransition)
1523
+ trans = ("~" if neg else "")+"Set "+ str(t.set)
1524
+ print(c.toString(self.parser, True) + ":" + trans, file=sys.stderr)
1525
+
1526
+ def noViableAlt(self, input:TokenStream, outerContext:ParserRuleContext, configs:ATNConfigSet, startIndex:int):
1527
+ return NoViableAltException(self.parser, input, input.get(startIndex), input.LT(1), configs, outerContext)
1528
+
1529
+ def getUniqueAlt(self, configs:ATNConfigSet):
1530
+ alt = ATN.INVALID_ALT_NUMBER
1531
+ for c in configs:
1532
+ if alt == ATN.INVALID_ALT_NUMBER:
1533
+ alt = c.alt # found first alt
1534
+ elif c.alt!=alt:
1535
+ return ATN.INVALID_ALT_NUMBER
1536
+ return alt
1537
+
1538
+ #
1539
+ # Add an edge to the DFA, if possible. This method calls
1540
+ # {@link #addDFAState} to ensure the {@code to} state is present in the
1541
+ # DFA. If {@code from} is {@code null}, or if {@code t} is outside the
1542
+ # range of edges that can be represented in the DFA tables, this method
1543
+ # returns without adding the edge to the DFA.
1544
+ #
1545
+ # <p>If {@code to} is {@code null}, this method returns {@code null}.
1546
+ # Otherwise, this method returns the {@link DFAState} returned by calling
1547
+ # {@link #addDFAState} for the {@code to} state.</p>
1548
+ #
1549
+ # @param dfa The DFA
1550
+ # @param from The source state for the edge
1551
+ # @param t The input symbol
1552
+ # @param to The target state for the edge
1553
+ #
1554
+ # @return If {@code to} is {@code null}, this method returns {@code null};
1555
+ # otherwise this method returns the result of calling {@link #addDFAState}
1556
+ # on {@code to}
1557
+ #
1558
+ def addDFAEdge(self, dfa:DFA, from_:DFAState, t:int, to:DFAState):
1559
+ if ParserATNSimulator.debug:
1560
+ print("EDGE " + str(from_) + " -> " + str(to) + " upon " + self.getTokenName(t))
1561
+
1562
+ if to is None:
1563
+ return None
1564
+
1565
+ to = self.addDFAState(dfa, to) # used existing if possible not incoming
1566
+ if from_ is None or t < -1 or t > self.atn.maxTokenType:
1567
+ return to
1568
+
1569
+ if from_.edges is None:
1570
+ from_.edges = [None] * (self.atn.maxTokenType + 2)
1571
+ from_.edges[t+1] = to # connect
1572
+
1573
+ if ParserATNSimulator.debug:
1574
+ names = None if self.parser is None else self.parser.literalNames
1575
+ print("DFA=\n" + dfa.toString(names))
1576
+
1577
+ return to
1578
+
1579
+ #
1580
+ # Add state {@code D} to the DFA if it is not already present, and return
1581
+ # the actual instance stored in the DFA. If a state equivalent to {@code D}
1582
+ # is already in the DFA, the existing state is returned. Otherwise this
1583
+ # method returns {@code D} after adding it to the DFA.
1584
+ #
1585
+ # <p>If {@code D} is {@link #ERROR}, this method returns {@link #ERROR} and
1586
+ # does not change the DFA.</p>
1587
+ #
1588
+ # @param dfa The dfa
1589
+ # @param D The DFA state to add
1590
+ # @return The state stored in the DFA. This will be either the existing
1591
+ # state if {@code D} is already in the DFA, or {@code D} itself if the
1592
+ # state was not already present.
1593
+ #
1594
+ def addDFAState(self, dfa:DFA, D:DFAState):
1595
+ if D is self.ERROR:
1596
+ return D
1597
+
1598
+
1599
+ existing = dfa.states.get(D, None)
1600
+ if existing is not None:
1601
+ return existing
1602
+
1603
+ D.stateNumber = len(dfa.states)
1604
+ if not D.configs.readonly:
1605
+ D.configs.optimizeConfigs(self)
1606
+ D.configs.setReadonly(True)
1607
+ dfa.states[D] = D
1608
+ if ParserATNSimulator.debug:
1609
+ print("adding new DFA state: " + str(D))
1610
+ return D
1611
+
1612
+ def reportAttemptingFullContext(self, dfa:DFA, conflictingAlts:set, configs:ATNConfigSet, startIndex:int, stopIndex:int):
1613
+ if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
1614
+ print("reportAttemptingFullContext decision=" + str(dfa.decision) + ":" + str(configs) +
1615
+ ", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex))
1616
+ if self.parser is not None:
1617
+ self.parser.getErrorListenerDispatch().reportAttemptingFullContext(self.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
1618
+
1619
+ def reportContextSensitivity(self, dfa:DFA, prediction:int, configs:ATNConfigSet, startIndex:int, stopIndex:int):
1620
+ if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
1621
+ print("reportContextSensitivity decision=" + str(dfa.decision) + ":" + str(configs) +
1622
+ ", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex))
1623
+ if self.parser is not None:
1624
+ self.parser.getErrorListenerDispatch().reportContextSensitivity(self.parser, dfa, startIndex, stopIndex, prediction, configs)
1625
+
1626
+ # If context sensitive parsing, we know it's ambiguity not conflict#
1627
+ def reportAmbiguity(self, dfa:DFA, D:DFAState, startIndex:int, stopIndex:int,
1628
+ exact:bool, ambigAlts:set, configs:ATNConfigSet ):
1629
+ if ParserATNSimulator.debug or ParserATNSimulator.retry_debug:
1630
+ # ParserATNPathFinder finder = new ParserATNPathFinder(parser, atn);
1631
+ # int i = 1;
1632
+ # for (Transition t : dfa.atnStartState.transitions) {
1633
+ # print("ALT "+i+"=");
1634
+ # print(startIndex+".."+stopIndex+", len(input)="+parser.getInputStream().size());
1635
+ # TraceTree path = finder.trace(t.target, parser.getContext(), (TokenStream)parser.getInputStream(),
1636
+ # startIndex, stopIndex);
1637
+ # if ( path!=null ) {
1638
+ # print("path = "+path.toStringTree());
1639
+ # for (TraceTree leaf : path.leaves) {
1640
+ # List<ATNState> states = path.getPathToNode(leaf);
1641
+ # print("states="+states);
1642
+ # }
1643
+ # }
1644
+ # i++;
1645
+ # }
1646
+ print("reportAmbiguity " + str(ambigAlts) + ":" + str(configs) +
1647
+ ", input=" + self.parser.getTokenStream().getText(startIndex, stopIndex))
1648
+ if self.parser is not None:
1649
+ self.parser.getErrorListenerDispatch().reportAmbiguity(self.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/PredictionMode.py ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+ #
7
+ # This enumeration defines the prediction modes available in ANTLR 4 along with
8
+ # utility methods for analyzing configuration sets for conflicts and/or
9
+ # ambiguities.
10
+
11
+
12
+ from enum import Enum
13
+ from antlr4.atn.ATN import ATN
14
+ from antlr4.atn.ATNConfig import ATNConfig
15
+ from antlr4.atn.ATNConfigSet import ATNConfigSet
16
+ from antlr4.atn.ATNState import RuleStopState
17
+ from antlr4.atn.SemanticContext import SemanticContext
18
+
19
+ PredictionMode = None
20
+
21
+ class PredictionMode(Enum):
22
+ #
23
+ # The SLL(*) prediction mode. This prediction mode ignores the current
24
+ # parser context when making predictions. This is the fastest prediction
25
+ # mode, and provides correct results for many grammars. This prediction
26
+ # mode is more powerful than the prediction mode provided by ANTLR 3, but
27
+ # may result in syntax errors for grammar and input combinations which are
28
+ # not SLL.
29
+ #
30
+ # <p>
31
+ # When using this prediction mode, the parser will either return a correct
32
+ # parse tree (i.e. the same parse tree that would be returned with the
33
+ # {@link #LL} prediction mode), or it will report a syntax error. If a
34
+ # syntax error is encountered when using the {@link #SLL} prediction mode,
35
+ # it may be due to either an actual syntax error in the input or indicate
36
+ # that the particular combination of grammar and input requires the more
37
+ # powerful {@link #LL} prediction abilities to complete successfully.</p>
38
+ #
39
+ # <p>
40
+ # This prediction mode does not provide any guarantees for prediction
41
+ # behavior for syntactically-incorrect inputs.</p>
42
+ #
43
+ SLL = 0
44
+ #
45
+ # The LL(*) prediction mode. This prediction mode allows the current parser
46
+ # context to be used for resolving SLL conflicts that occur during
47
+ # prediction. This is the fastest prediction mode that guarantees correct
48
+ # parse results for all combinations of grammars with syntactically correct
49
+ # inputs.
50
+ #
51
+ # <p>
52
+ # When using this prediction mode, the parser will make correct decisions
53
+ # for all syntactically-correct grammar and input combinations. However, in
54
+ # cases where the grammar is truly ambiguous this prediction mode might not
55
+ # report a precise answer for <em>exactly which</em> alternatives are
56
+ # ambiguous.</p>
57
+ #
58
+ # <p>
59
+ # This prediction mode does not provide any guarantees for prediction
60
+ # behavior for syntactically-incorrect inputs.</p>
61
+ #
62
+ LL = 1
63
+ #
64
+ # The LL(*) prediction mode with exact ambiguity detection. In addition to
65
+ # the correctness guarantees provided by the {@link #LL} prediction mode,
66
+ # this prediction mode instructs the prediction algorithm to determine the
67
+ # complete and exact set of ambiguous alternatives for every ambiguous
68
+ # decision encountered while parsing.
69
+ #
70
+ # <p>
71
+ # This prediction mode may be used for diagnosing ambiguities during
72
+ # grammar development. Due to the performance overhead of calculating sets
73
+ # of ambiguous alternatives, this prediction mode should be avoided when
74
+ # the exact results are not necessary.</p>
75
+ #
76
+ # <p>
77
+ # This prediction mode does not provide any guarantees for prediction
78
+ # behavior for syntactically-incorrect inputs.</p>
79
+ #
80
+ LL_EXACT_AMBIG_DETECTION = 2
81
+
82
+
83
+ #
84
+ # Computes the SLL prediction termination condition.
85
+ #
86
+ # <p>
87
+ # This method computes the SLL prediction termination condition for both of
88
+ # the following cases.</p>
89
+ #
90
+ # <ul>
91
+ # <li>The usual SLL+LL fallback upon SLL conflict</li>
92
+ # <li>Pure SLL without LL fallback</li>
93
+ # </ul>
94
+ #
95
+ # <p><strong>COMBINED SLL+LL PARSING</strong></p>
96
+ #
97
+ # <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
98
+ # ensured regardless of how the termination condition is computed by this
99
+ # method. Due to the substantially higher cost of LL prediction, the
100
+ # prediction should only fall back to LL when the additional lookahead
101
+ # cannot lead to a unique SLL prediction.</p>
102
+ #
103
+ # <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
104
+ # conflicting subsets should fall back to full LL, even if the
105
+ # configuration sets don't resolve to the same alternative (e.g.
106
+ # {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
107
+ # configuration, SLL could continue with the hopes that more lookahead will
108
+ # resolve via one of those non-conflicting configurations.</p>
109
+ #
110
+ # <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
111
+ # stops when it sees only conflicting configuration subsets. In contrast,
112
+ # full LL keeps going when there is uncertainty.</p>
113
+ #
114
+ # <p><strong>HEURISTIC</strong></p>
115
+ #
116
+ # <p>As a heuristic, we stop prediction when we see any conflicting subset
117
+ # unless we see a state that only has one alternative associated with it.
118
+ # The single-alt-state thing lets prediction continue upon rules like
119
+ # (otherwise, it would admit defeat too soon):</p>
120
+ #
121
+ # <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}</p>
122
+ #
123
+ # <p>When the ATN simulation reaches the state before {@code ';'}, it has a
124
+ # DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
125
+ # {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
126
+ # processing this node because alternative to has another way to continue,
127
+ # via {@code [6|2|[]]}.</p>
128
+ #
129
+ # <p>It also let's us continue for this rule:</p>
130
+ #
131
+ # <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}</p>
132
+ #
133
+ # <p>After matching input A, we reach the stop state for rule A, state 1.
134
+ # State 8 is the state right before B. Clearly alternatives 1 and 2
135
+ # conflict and no amount of further lookahead will separate the two.
136
+ # However, alternative 3 will be able to continue and so we do not stop
137
+ # working on this state. In the previous example, we're concerned with
138
+ # states associated with the conflicting alternatives. Here alt 3 is not
139
+ # associated with the conflicting configs, but since we can continue
140
+ # looking for input reasonably, don't declare the state done.</p>
141
+ #
142
+ # <p><strong>PURE SLL PARSING</strong></p>
143
+ #
144
+ # <p>To handle pure SLL parsing, all we have to do is make sure that we
145
+ # combine stack contexts for configurations that differ only by semantic
146
+ # predicate. From there, we can do the usual SLL termination heuristic.</p>
147
+ #
148
+ # <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
149
+ #
150
+ # <p>SLL decisions don't evaluate predicates until after they reach DFA stop
151
+ # states because they need to create the DFA cache that works in all
152
+ # semantic situations. In contrast, full LL evaluates predicates collected
153
+ # during start state computation so it can ignore predicates thereafter.
154
+ # This means that SLL termination detection can totally ignore semantic
155
+ # predicates.</p>
156
+ #
157
+ # <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
158
+ # semantic predicate contexts so we might see two configurations like the
159
+ # following.</p>
160
+ #
161
+ # <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
162
+ #
163
+ # <p>Before testing these configurations against others, we have to merge
164
+ # {@code x} and {@code x'} (without modifying the existing configurations).
165
+ # For example, we test {@code (x+x')==x''} when looking for conflicts in
166
+ # the following configurations.</p>
167
+ #
168
+ # <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}</p>
169
+ #
170
+ # <p>If the configuration set has predicates (as indicated by
171
+ # {@link ATNConfigSet#hasSemanticContext}), this algorithm makes a copy of
172
+ # the configurations to strip out all of the predicates so that a standard
173
+ # {@link ATNConfigSet} will merge everything ignoring predicates.</p>
174
+ #
175
+ @classmethod
176
+ def hasSLLConflictTerminatingPrediction(cls, mode:PredictionMode, configs:ATNConfigSet):
177
+ # Configs in rule stop states indicate reaching the end of the decision
178
+ # rule (local context) or end of start rule (full context). If all
179
+ # configs meet this condition, then none of the configurations is able
180
+ # to match additional input so we terminate prediction.
181
+ #
182
+ if cls.allConfigsInRuleStopStates(configs):
183
+ return True
184
+
185
+ # pure SLL mode parsing
186
+ if mode == PredictionMode.SLL:
187
+ # Don't bother with combining configs from different semantic
188
+ # contexts if we can fail over to full LL; costs more time
189
+ # since we'll often fail over anyway.
190
+ if configs.hasSemanticContext:
191
+ # dup configs, tossing out semantic predicates
192
+ dup = ATNConfigSet()
193
+ for c in configs:
194
+ c = ATNConfig(config=c, semantic=SemanticContext.NONE)
195
+ dup.add(c)
196
+ configs = dup
197
+ # now we have combined contexts for configs with dissimilar preds
198
+
199
+ # pure SLL or combined SLL+LL mode parsing
200
+ altsets = cls.getConflictingAltSubsets(configs)
201
+ return cls.hasConflictingAltSet(altsets) and not cls.hasStateAssociatedWithOneAlt(configs)
202
+
203
+ # Checks if any configuration in {@code configs} is in a
204
+ # {@link RuleStopState}. Configurations meeting this condition have reached
205
+ # the end of the decision rule (local context) or end of start rule (full
206
+ # context).
207
+ #
208
+ # @param configs the configuration set to test
209
+ # @return {@code true} if any configuration in {@code configs} is in a
210
+ # {@link RuleStopState}, otherwise {@code false}
211
+ @classmethod
212
+ def hasConfigInRuleStopState(cls, configs:ATNConfigSet):
213
+ return any(isinstance(cfg.state, RuleStopState) for cfg in configs)
214
+
215
+ # Checks if all configurations in {@code configs} are in a
216
+ # {@link RuleStopState}. Configurations meeting this condition have reached
217
+ # the end of the decision rule (local context) or end of start rule (full
218
+ # context).
219
+ #
220
+ # @param configs the configuration set to test
221
+ # @return {@code true} if all configurations in {@code configs} are in a
222
+ # {@link RuleStopState}, otherwise {@code false}
223
+ @classmethod
224
+ def allConfigsInRuleStopStates(cls, configs:ATNConfigSet):
225
+ return all(isinstance(cfg.state, RuleStopState) for cfg in configs)
226
+
227
+ #
228
+ # Full LL prediction termination.
229
+ #
230
+ # <p>Can we stop looking ahead during ATN simulation or is there some
231
+ # uncertainty as to which alternative we will ultimately pick, after
232
+ # consuming more input? Even if there are partial conflicts, we might know
233
+ # that everything is going to resolve to the same minimum alternative. That
234
+ # means we can stop since no more lookahead will change that fact. On the
235
+ # other hand, there might be multiple conflicts that resolve to different
236
+ # minimums. That means we need more look ahead to decide which of those
237
+ # alternatives we should predict.</p>
238
+ #
239
+ # <p>The basic idea is to split the set of configurations {@code C}, into
240
+ # conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
241
+ # non-conflicting configurations. Two configurations conflict if they have
242
+ # identical {@link ATNConfig#state} and {@link ATNConfig#context} values
243
+ # but different {@link ATNConfig#alt} value, e.g. {@code (s, i, ctx, _)}
244
+ # and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
245
+ #
246
+ # <p>Reduce these configuration subsets to the set of possible alternatives.
247
+ # You can compute the alternative subsets in one pass as follows:</p>
248
+ #
249
+ # <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
250
+ # {@code C} holding {@code s} and {@code ctx} fixed.</p>
251
+ #
252
+ # <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
253
+ #
254
+ # <pre>
255
+ # map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
256
+ # alt and not pred
257
+ # </pre>
258
+ #
259
+ # <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
260
+ #
261
+ # <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
262
+ # {@code s} and {@code ctx}.</p>
263
+ #
264
+ # <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
265
+ # the union of these alternative subsets is a singleton, then no amount of
266
+ # more lookahead will help us. We will always pick that alternative. If,
267
+ # however, there is more than one alternative, then we are uncertain which
268
+ # alternative to predict and must continue looking for resolution. We may
269
+ # or may not discover an ambiguity in the future, even if there are no
270
+ # conflicting subsets this round.</p>
271
+ #
272
+ # <p>The biggest sin is to terminate early because it means we've made a
273
+ # decision but were uncertain as to the eventual outcome. We haven't used
274
+ # enough lookahead. On the other hand, announcing a conflict too late is no
275
+ # big deal; you will still have the conflict. It's just inefficient. It
276
+ # might even look until the end of file.</p>
277
+ #
278
+ # <p>No special consideration for semantic predicates is required because
279
+ # predicates are evaluated on-the-fly for full LL prediction, ensuring that
280
+ # no configuration contains a semantic context during the termination
281
+ # check.</p>
282
+ #
283
+ # <p><strong>CONFLICTING CONFIGS</strong></p>
284
+ #
285
+ # <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
286
+ # when {@code i!=j} but {@code x=x'}. Because we merge all
287
+ # {@code (s, i, _)} configurations together, that means that there are at
288
+ # most {@code n} configurations associated with state {@code s} for
289
+ # {@code n} possible alternatives in the decision. The merged stacks
290
+ # complicate the comparison of configuration contexts {@code x} and
291
+ # {@code x'}. Sam checks to see if one is a subset of the other by calling
292
+ # merge and checking to see if the merged result is either {@code x} or
293
+ # {@code x'}. If the {@code x} associated with lowest alternative {@code i}
294
+ # is the superset, then {@code i} is the only possible prediction since the
295
+ # others resolve to {@code min(i)} as well. However, if {@code x} is
296
+ # associated with {@code j>i} then at least one stack configuration for
297
+ # {@code j} is not in conflict with alternative {@code i}. The algorithm
298
+ # should keep going, looking for more lookahead due to the uncertainty.</p>
299
+ #
300
+ # <p>For simplicity, I'm doing a equality check between {@code x} and
301
+ # {@code x'} that lets the algorithm continue to consume lookahead longer
302
+ # than necessary. The reason I like the equality is of course the
303
+ # simplicity but also because that is the test you need to detect the
304
+ # alternatives that are actually in conflict.</p>
305
+ #
306
+ # <p><strong>CONTINUE/STOP RULE</strong></p>
307
+ #
308
+ # <p>Continue if union of resolved alternative sets from non-conflicting and
309
+ # conflicting alternative subsets has more than one alternative. We are
310
+ # uncertain about which alternative to predict.</p>
311
+ #
312
+ # <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
313
+ # alternatives are still in the running for the amount of input we've
314
+ # consumed at this point. The conflicting sets let us to strip away
315
+ # configurations that won't lead to more states because we resolve
316
+ # conflicts to the configuration with a minimum alternate for the
317
+ # conflicting set.</p>
318
+ #
319
+ # <p><strong>CASES</strong></p>
320
+ #
321
+ # <ul>
322
+ #
323
+ # <li>no conflicts and more than 1 alternative in set =&gt; continue</li>
324
+ #
325
+ # <li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
326
+ # {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
327
+ # {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
328
+ # {@code {1,3}} =&gt; continue
329
+ # </li>
330
+ #
331
+ # <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
332
+ # {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
333
+ # {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
334
+ # {@code {1}} =&gt; stop and predict 1</li>
335
+ #
336
+ # <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
337
+ # {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
338
+ # {@code {1}} = {@code {1}} =&gt; stop and predict 1, can announce
339
+ # ambiguity {@code {1,2}}</li>
340
+ #
341
+ # <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
342
+ # {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
343
+ # {@code {2}} = {@code {1,2}} =&gt; continue</li>
344
+ #
345
+ # <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
346
+ # {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
347
+ # {@code {3}} = {@code {1,3}} =&gt; continue</li>
348
+ #
349
+ # </ul>
350
+ #
351
+ # <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
352
+ #
353
+ # <p>If all states report the same conflicting set of alternatives, then we
354
+ # know we have the exact ambiguity set.</p>
355
+ #
356
+ # <p><code>|A_<em>i</em>|&gt;1</code> and
357
+ # <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
358
+ #
359
+ # <p>In other words, we continue examining lookahead until all {@code A_i}
360
+ # have more than one alternative and all {@code A_i} are the same. If
361
+ # {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
362
+ # because the resolved set is {@code {1}}. To determine what the real
363
+ # ambiguity is, we have to know whether the ambiguity is between one and
364
+ # two or one and three so we keep going. We can only stop prediction when
365
+ # we need exact ambiguity detection when the sets look like
366
+ # {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
367
+ #
368
+ @classmethod
369
+ def resolvesToJustOneViableAlt(cls, altsets:list):
370
+ return cls.getSingleViableAlt(altsets)
371
+
372
+ #
373
+ # Determines if every alternative subset in {@code altsets} contains more
374
+ # than one alternative.
375
+ #
376
+ # @param altsets a collection of alternative subsets
377
+ # @return {@code true} if every {@link BitSet} in {@code altsets} has
378
+ # {@link BitSet#cardinality cardinality} &gt; 1, otherwise {@code false}
379
+ #
380
+ @classmethod
381
+ def allSubsetsConflict(cls, altsets:list):
382
+ return not cls.hasNonConflictingAltSet(altsets)
383
+
384
+ #
385
+ # Determines if any single alternative subset in {@code altsets} contains
386
+ # exactly one alternative.
387
+ #
388
+ # @param altsets a collection of alternative subsets
389
+ # @return {@code true} if {@code altsets} contains a {@link BitSet} with
390
+ # {@link BitSet#cardinality cardinality} 1, otherwise {@code false}
391
+ #
392
+ @classmethod
393
+ def hasNonConflictingAltSet(cls, altsets:list):
394
+ return any(len(alts) == 1 for alts in altsets)
395
+
396
+ #
397
+ # Determines if any single alternative subset in {@code altsets} contains
398
+ # more than one alternative.
399
+ #
400
+ # @param altsets a collection of alternative subsets
401
+ # @return {@code true} if {@code altsets} contains a {@link BitSet} with
402
+ # {@link BitSet#cardinality cardinality} &gt; 1, otherwise {@code false}
403
+ #
404
+ @classmethod
405
+ def hasConflictingAltSet(cls, altsets:list):
406
+ return any(len(alts) > 1 for alts in altsets)
407
+
408
+ #
409
+ # Determines if every alternative subset in {@code altsets} is equivalent.
410
+ #
411
+ # @param altsets a collection of alternative subsets
412
+ # @return {@code true} if every member of {@code altsets} is equal to the
413
+ # others, otherwise {@code false}
414
+ #
415
+ @classmethod
416
+ def allSubsetsEqual(cls, altsets:list):
417
+ if not altsets:
418
+ return True
419
+ first = next(iter(altsets))
420
+ return all(alts == first for alts in iter(altsets))
421
+
422
+ #
423
+ # Returns the unique alternative predicted by all alternative subsets in
424
+ # {@code altsets}. If no such alternative exists, this method returns
425
+ # {@link ATN#INVALID_ALT_NUMBER}.
426
+ #
427
+ # @param altsets a collection of alternative subsets
428
+ #
429
+ @classmethod
430
+ def getUniqueAlt(cls, altsets:list):
431
+ all = cls.getAlts(altsets)
432
+ if len(all)==1:
433
+ return next(iter(all))
434
+ return ATN.INVALID_ALT_NUMBER
435
+
436
+ # Gets the complete set of represented alternatives for a collection of
437
+ # alternative subsets. This method returns the union of each {@link BitSet}
438
+ # in {@code altsets}.
439
+ #
440
+ # @param altsets a collection of alternative subsets
441
+ # @return the set of represented alternatives in {@code altsets}
442
+ #
443
+ @classmethod
444
+ def getAlts(cls, altsets:list):
445
+ return set.union(*altsets)
446
+
447
+ #
448
+ # This function gets the conflicting alt subsets from a configuration set.
449
+ # For each configuration {@code c} in {@code configs}:
450
+ #
451
+ # <pre>
452
+ # map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
453
+ # alt and not pred
454
+ # </pre>
455
+ #
456
+ @classmethod
457
+ def getConflictingAltSubsets(cls, configs:ATNConfigSet):
458
+ configToAlts = dict()
459
+ for c in configs:
460
+ h = hash((c.state.stateNumber, c.context))
461
+ alts = configToAlts.get(h, None)
462
+ if alts is None:
463
+ alts = set()
464
+ configToAlts[h] = alts
465
+ alts.add(c.alt)
466
+ return configToAlts.values()
467
+
468
+ #
469
+ # Get a map from state to alt subset from a configuration set. For each
470
+ # configuration {@code c} in {@code configs}:
471
+ #
472
+ # <pre>
473
+ # map[c.{@link ATNConfig#state state}] U= c.{@link ATNConfig#alt alt}
474
+ # </pre>
475
+ #
476
+ @classmethod
477
+ def getStateToAltMap(cls, configs:ATNConfigSet):
478
+ m = dict()
479
+ for c in configs:
480
+ alts = m.get(c.state, None)
481
+ if alts is None:
482
+ alts = set()
483
+ m[c.state] = alts
484
+ alts.add(c.alt)
485
+ return m
486
+
487
+ @classmethod
488
+ def hasStateAssociatedWithOneAlt(cls, configs:ATNConfigSet):
489
+ return any(len(alts) == 1 for alts in cls.getStateToAltMap(configs).values())
490
+
491
+ @classmethod
492
+ def getSingleViableAlt(cls, altsets:list):
493
+ viableAlts = set()
494
+ for alts in altsets:
495
+ minAlt = min(alts)
496
+ viableAlts.add(minAlt)
497
+ if len(viableAlts)>1 : # more than 1 viable alt
498
+ return ATN.INVALID_ALT_NUMBER
499
+ return min(viableAlts)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/SemanticContext.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ # A tree structure used to record the semantic context in which
8
+ # an ATN configuration is valid. It's either a single predicate,
9
+ # a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
10
+ #
11
+ # <p>I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
12
+ # {@link SemanticContext} within the scope of this outer class.</p>
13
+ #
14
+ from antlr4.Recognizer import Recognizer
15
+ from antlr4.RuleContext import RuleContext
16
+ from io import StringIO
17
+
18
+
19
+ class SemanticContext(object):
20
+ #
21
+ # The default {@link SemanticContext}, which is semantically equivalent to
22
+ # a predicate of the form {@code {true}?}.
23
+ #
24
+ NONE = None
25
+
26
+ #
27
+ # For context independent predicates, we evaluate them without a local
28
+ # context (i.e., null context). That way, we can evaluate them without
29
+ # having to create proper rule-specific context during prediction (as
30
+ # opposed to the parser, which creates them naturally). In a practical
31
+ # sense, this avoids a cast exception from RuleContext to myruleContext.
32
+ #
33
+ # <p>For context dependent predicates, we must pass in a local context so that
34
+ # references such as $arg evaluate properly as _localctx.arg. We only
35
+ # capture context dependent predicates in the context in which we begin
36
+ # prediction, so we passed in the outer context here in case of context
37
+ # dependent predicate evaluation.</p>
38
+ #
39
+ def eval(self, parser:Recognizer , outerContext:RuleContext ):
40
+ pass
41
+
42
+ #
43
+ # Evaluate the precedence predicates for the context and reduce the result.
44
+ #
45
+ # @param parser The parser instance.
46
+ # @param outerContext The current parser context object.
47
+ # @return The simplified semantic context after precedence predicates are
48
+ # evaluated, which will be one of the following values.
49
+ # <ul>
50
+ # <li>{@link #NONE}: if the predicate simplifies to {@code true} after
51
+ # precedence predicates are evaluated.</li>
52
+ # <li>{@code null}: if the predicate simplifies to {@code false} after
53
+ # precedence predicates are evaluated.</li>
54
+ # <li>{@code this}: if the semantic context is not changed as a result of
55
+ # precedence predicate evaluation.</li>
56
+ # <li>A non-{@code null} {@link SemanticContext}: the new simplified
57
+ # semantic context after precedence predicates are evaluated.</li>
58
+ # </ul>
59
+ #
60
+ def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
61
+ return self
62
+
63
+ # need forward declaration
64
+ AND = None
65
+
66
+ def andContext(a:SemanticContext, b:SemanticContext):
67
+ if a is None or a is SemanticContext.NONE:
68
+ return b
69
+ if b is None or b is SemanticContext.NONE:
70
+ return a
71
+ result = AND(a, b)
72
+ if len(result.opnds) == 1:
73
+ return result.opnds[0]
74
+ else:
75
+ return result
76
+
77
+ # need forward declaration
78
+ OR = None
79
+
80
+ def orContext(a:SemanticContext, b:SemanticContext):
81
+ if a is None:
82
+ return b
83
+ if b is None:
84
+ return a
85
+ if a is SemanticContext.NONE or b is SemanticContext.NONE:
86
+ return SemanticContext.NONE
87
+ result = OR(a, b)
88
+ if len(result.opnds) == 1:
89
+ return result.opnds[0]
90
+ else:
91
+ return result
92
+
93
+ def filterPrecedencePredicates(collection:set):
94
+ return [context for context in collection if isinstance(context, PrecedencePredicate)]
95
+
96
+
97
+ class Predicate(SemanticContext):
98
+ __slots__ = ('ruleIndex', 'predIndex', 'isCtxDependent')
99
+
100
+ def __init__(self, ruleIndex:int=-1, predIndex:int=-1, isCtxDependent:bool=False):
101
+ self.ruleIndex = ruleIndex
102
+ self.predIndex = predIndex
103
+ self.isCtxDependent = isCtxDependent # e.g., $i ref in pred
104
+
105
+ def eval(self, parser:Recognizer , outerContext:RuleContext ):
106
+ localctx = outerContext if self.isCtxDependent else None
107
+ return parser.sempred(localctx, self.ruleIndex, self.predIndex)
108
+
109
+ def __hash__(self):
110
+ return hash((self.ruleIndex, self.predIndex, self.isCtxDependent))
111
+
112
+ def __eq__(self, other):
113
+ if self is other:
114
+ return True
115
+ elif not isinstance(other, Predicate):
116
+ return False
117
+ return self.ruleIndex == other.ruleIndex and \
118
+ self.predIndex == other.predIndex and \
119
+ self.isCtxDependent == other.isCtxDependent
120
+
121
+ def __str__(self):
122
+ return "{" + str(self.ruleIndex) + ":" + str(self.predIndex) + "}?"
123
+
124
+
125
+ class PrecedencePredicate(SemanticContext):
126
+
127
+ def __init__(self, precedence:int=0):
128
+ self.precedence = precedence
129
+
130
+ def eval(self, parser:Recognizer , outerContext:RuleContext ):
131
+ return parser.precpred(outerContext, self.precedence)
132
+
133
+ def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
134
+ if parser.precpred(outerContext, self.precedence):
135
+ return SemanticContext.NONE
136
+ else:
137
+ return None
138
+
139
+ def __lt__(self, other):
140
+ return self.precedence < other.precedence
141
+
142
+ def __hash__(self):
143
+ return 31
144
+
145
+ def __eq__(self, other):
146
+ if self is other:
147
+ return True
148
+ elif not isinstance(other, PrecedencePredicate):
149
+ return False
150
+ else:
151
+ return self.precedence == other.precedence
152
+
153
+ # A semantic context which is true whenever none of the contained contexts
154
+ # is false.
155
+ del AND
156
+ class AND(SemanticContext):
157
+ __slots__ = 'opnds'
158
+
159
+ def __init__(self, a:SemanticContext, b:SemanticContext):
160
+ operands = set()
161
+ if isinstance( a, AND ):
162
+ operands.update(a.opnds)
163
+ else:
164
+ operands.add(a)
165
+ if isinstance( b, AND ):
166
+ operands.update(b.opnds)
167
+ else:
168
+ operands.add(b)
169
+
170
+ precedencePredicates = filterPrecedencePredicates(operands)
171
+ if len(precedencePredicates)>0:
172
+ # interested in the transition with the lowest precedence
173
+ reduced = min(precedencePredicates)
174
+ operands.add(reduced)
175
+
176
+ self.opnds = list(operands)
177
+
178
+ def __eq__(self, other):
179
+ if self is other:
180
+ return True
181
+ elif not isinstance(other, AND):
182
+ return False
183
+ else:
184
+ return self.opnds == other.opnds
185
+
186
+ def __hash__(self):
187
+ h = 0
188
+ for o in self.opnds:
189
+ h = hash((h, o))
190
+ return hash((h, "AND"))
191
+
192
+ #
193
+ # {@inheritDoc}
194
+ #
195
+ # <p>
196
+ # The evaluation of predicates by this context is short-circuiting, but
197
+ # unordered.</p>
198
+ #
199
+ def eval(self, parser:Recognizer, outerContext:RuleContext):
200
+ return all(opnd.eval(parser, outerContext) for opnd in self.opnds)
201
+
202
+ def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
203
+ differs = False
204
+ operands = []
205
+ for context in self.opnds:
206
+ evaluated = context.evalPrecedence(parser, outerContext)
207
+ differs |= evaluated is not context
208
+ if evaluated is None:
209
+ # The AND context is false if any element is false
210
+ return None
211
+ elif evaluated is not SemanticContext.NONE:
212
+ # Reduce the result by skipping true elements
213
+ operands.append(evaluated)
214
+
215
+ if not differs:
216
+ return self
217
+
218
+ if len(operands)==0:
219
+ # all elements were true, so the AND context is true
220
+ return SemanticContext.NONE
221
+
222
+ result = None
223
+ for o in operands:
224
+ result = o if result is None else andContext(result, o)
225
+
226
+ return result
227
+
228
+ def __str__(self):
229
+ with StringIO() as buf:
230
+ first = True
231
+ for o in self.opnds:
232
+ if not first:
233
+ buf.write("&&")
234
+ buf.write(str(o))
235
+ first = False
236
+ return buf.getvalue()
237
+
238
+ #
239
+ # A semantic context which is true whenever at least one of the contained
240
+ # contexts is true.
241
+ del OR
242
+ class OR (SemanticContext):
243
+ __slots__ = 'opnds'
244
+
245
+ def __init__(self, a:SemanticContext, b:SemanticContext):
246
+ operands = set()
247
+ if isinstance( a, OR ):
248
+ operands.update(a.opnds)
249
+ else:
250
+ operands.add(a)
251
+ if isinstance( b, OR ):
252
+ operands.update(b.opnds)
253
+ else:
254
+ operands.add(b)
255
+
256
+ precedencePredicates = filterPrecedencePredicates(operands)
257
+ if len(precedencePredicates)>0:
258
+ # interested in the transition with the highest precedence
259
+ s = sorted(precedencePredicates)
260
+ reduced = s[-1]
261
+ operands.add(reduced)
262
+
263
+ self.opnds = list(operands)
264
+
265
+ def __eq__(self, other):
266
+ if self is other:
267
+ return True
268
+ elif not isinstance(other, OR):
269
+ return False
270
+ else:
271
+ return self.opnds == other.opnds
272
+
273
+ def __hash__(self):
274
+ h = 0
275
+ for o in self.opnds:
276
+ h = hash((h, o))
277
+ return hash((h, "OR"))
278
+
279
+ # <p>
280
+ # The evaluation of predicates by this context is short-circuiting, but
281
+ # unordered.</p>
282
+ #
283
+ def eval(self, parser:Recognizer, outerContext:RuleContext):
284
+ return any(opnd.eval(parser, outerContext) for opnd in self.opnds)
285
+
286
+ def evalPrecedence(self, parser:Recognizer, outerContext:RuleContext):
287
+ differs = False
288
+ operands = []
289
+ for context in self.opnds:
290
+ evaluated = context.evalPrecedence(parser, outerContext)
291
+ differs |= evaluated is not context
292
+ if evaluated is SemanticContext.NONE:
293
+ # The OR context is true if any element is true
294
+ return SemanticContext.NONE
295
+ elif evaluated is not None:
296
+ # Reduce the result by skipping false elements
297
+ operands.append(evaluated)
298
+
299
+ if not differs:
300
+ return self
301
+
302
+ if len(operands)==0:
303
+ # all elements were false, so the OR context is false
304
+ return None
305
+
306
+ result = None
307
+ for o in operands:
308
+ result = o if result is None else orContext(result, o)
309
+
310
+ return result
311
+
312
+ def __str__(self):
313
+ with StringIO() as buf:
314
+ first = True
315
+ for o in self.opnds:
316
+ if not first:
317
+ buf.write("||")
318
+ buf.write(str(o))
319
+ first = False
320
+ return buf.getvalue()
321
+
322
+
323
+ SemanticContext.NONE = Predicate()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/Transition.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ # An ATN transition between any two ATN states. Subclasses define
8
+ # atom, set, epsilon, action, predicate, rule transitions.
9
+ #
10
+ # <p>This is a one way link. It emanates from a state (usually via a list of
11
+ # transitions) and has a target state.</p>
12
+ #
13
+ # <p>Since we never have to change the ATN transitions once we construct it,
14
+ # we can fix these transitions as specific classes. The DFA transitions
15
+ # on the other hand need to update the labels as it adds transitions to
16
+ # the states. We'll use the term Edge for the DFA to distinguish them from
17
+ # ATN transitions.</p>
18
+ #
19
+ from antlr4.IntervalSet import IntervalSet
20
+ from antlr4.Token import Token
21
+
22
+ # need forward declarations
23
+ from antlr4.atn.SemanticContext import Predicate, PrecedencePredicate
24
+
25
+ ATNState = None
26
+ RuleStartState = None
27
+
28
+ class Transition (object):
29
+ __slots__ = ('target','isEpsilon','label')
30
+
31
+ # constants for serialization
32
+ EPSILON = 1
33
+ RANGE = 2
34
+ RULE = 3
35
+ PREDICATE = 4 # e.g., {isType(input.LT(1))}?
36
+ ATOM = 5
37
+ ACTION = 6
38
+ SET = 7 # ~(A|B) or ~atom, wildcard, which convert to next 2
39
+ NOT_SET = 8
40
+ WILDCARD = 9
41
+ PRECEDENCE = 10
42
+
43
+ serializationNames = [
44
+ "INVALID",
45
+ "EPSILON",
46
+ "RANGE",
47
+ "RULE",
48
+ "PREDICATE",
49
+ "ATOM",
50
+ "ACTION",
51
+ "SET",
52
+ "NOT_SET",
53
+ "WILDCARD",
54
+ "PRECEDENCE"
55
+ ]
56
+
57
+ serializationTypes = dict()
58
+
59
+ def __init__(self, target:ATNState):
60
+ # The target of this transition.
61
+ if target is None:
62
+ raise Exception("target cannot be null.")
63
+ self.target = target
64
+ # Are we epsilon, action, sempred?
65
+ self.isEpsilon = False
66
+ self.label = None
67
+
68
+
69
+ # TODO: make all transitions sets? no, should remove set edges
70
+ class AtomTransition(Transition):
71
+ __slots__ = ('label_', 'serializationType')
72
+
73
+ def __init__(self, target:ATNState, label:int):
74
+ super().__init__(target)
75
+ self.label_ = label # The token type or character value; or, signifies special label.
76
+ self.label = self.makeLabel()
77
+ self.serializationType = self.ATOM
78
+
79
+ def makeLabel(self):
80
+ s = IntervalSet()
81
+ s.addOne(self.label_)
82
+ return s
83
+
84
+ def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
85
+ return self.label_ == symbol
86
+
87
+ def __str__(self):
88
+ return str(self.label_)
89
+
90
+ class RuleTransition(Transition):
91
+ __slots__ = ('ruleIndex', 'precedence', 'followState', 'serializationType')
92
+
93
+ def __init__(self, ruleStart:RuleStartState, ruleIndex:int, precedence:int, followState:ATNState):
94
+ super().__init__(ruleStart)
95
+ self.ruleIndex = ruleIndex # ptr to the rule definition object for this rule ref
96
+ self.precedence = precedence
97
+ self.followState = followState # what node to begin computations following ref to rule
98
+ self.serializationType = self.RULE
99
+ self.isEpsilon = True
100
+
101
+ def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
102
+ return False
103
+
104
+
105
+ class EpsilonTransition(Transition):
106
+ __slots__ = ('serializationType', 'outermostPrecedenceReturn')
107
+
108
+ def __init__(self, target, outermostPrecedenceReturn=-1):
109
+ super(EpsilonTransition, self).__init__(target)
110
+ self.serializationType = self.EPSILON
111
+ self.isEpsilon = True
112
+ self.outermostPrecedenceReturn = outermostPrecedenceReturn
113
+
114
+ def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
115
+ return False
116
+
117
+ def __str__(self):
118
+ return "epsilon"
119
+
120
+ class RangeTransition(Transition):
121
+ __slots__ = ('serializationType', 'start', 'stop')
122
+
123
+ def __init__(self, target:ATNState, start:int, stop:int):
124
+ super().__init__(target)
125
+ self.serializationType = self.RANGE
126
+ self.start = start
127
+ self.stop = stop
128
+ self.label = self.makeLabel()
129
+
130
+ def makeLabel(self):
131
+ s = IntervalSet()
132
+ s.addRange(range(self.start, self.stop + 1))
133
+ return s
134
+
135
+ def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
136
+ return symbol >= self.start and symbol <= self.stop
137
+
138
+ def __str__(self):
139
+ return "'" + chr(self.start) + "'..'" + chr(self.stop) + "'"
140
+
141
+ class AbstractPredicateTransition(Transition):
142
+
143
+ def __init__(self, target:ATNState):
144
+ super().__init__(target)
145
+
146
+
147
+ class PredicateTransition(AbstractPredicateTransition):
148
+ __slots__ = ('serializationType', 'ruleIndex', 'predIndex', 'isCtxDependent')
149
+
150
+ def __init__(self, target:ATNState, ruleIndex:int, predIndex:int, isCtxDependent:bool):
151
+ super().__init__(target)
152
+ self.serializationType = self.PREDICATE
153
+ self.ruleIndex = ruleIndex
154
+ self.predIndex = predIndex
155
+ self.isCtxDependent = isCtxDependent # e.g., $i ref in pred
156
+ self.isEpsilon = True
157
+
158
+ def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
159
+ return False
160
+
161
+ def getPredicate(self):
162
+ return Predicate(self.ruleIndex, self.predIndex, self.isCtxDependent)
163
+
164
+ def __str__(self):
165
+ return "pred_" + str(self.ruleIndex) + ":" + str(self.predIndex)
166
+
167
+ class ActionTransition(Transition):
168
+ __slots__ = ('serializationType', 'ruleIndex', 'actionIndex', 'isCtxDependent')
169
+
170
+ def __init__(self, target:ATNState, ruleIndex:int, actionIndex:int=-1, isCtxDependent:bool=False):
171
+ super().__init__(target)
172
+ self.serializationType = self.ACTION
173
+ self.ruleIndex = ruleIndex
174
+ self.actionIndex = actionIndex
175
+ self.isCtxDependent = isCtxDependent # e.g., $i ref in pred
176
+ self.isEpsilon = True
177
+
178
+ def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
179
+ return False
180
+
181
+ def __str__(self):
182
+ return "action_"+self.ruleIndex+":"+self.actionIndex
183
+
184
+ # A transition containing a set of values.
185
+ class SetTransition(Transition):
186
+ __slots__ = 'serializationType'
187
+
188
+ def __init__(self, target:ATNState, set:IntervalSet):
189
+ super().__init__(target)
190
+ self.serializationType = self.SET
191
+ if set is not None:
192
+ self.label = set
193
+ else:
194
+ self.label = IntervalSet()
195
+ self.label.addRange(range(Token.INVALID_TYPE, Token.INVALID_TYPE + 1))
196
+
197
+ def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
198
+ return symbol in self.label
199
+
200
+ def __str__(self):
201
+ return str(self.label)
202
+
203
+ class NotSetTransition(SetTransition):
204
+
205
+ def __init__(self, target:ATNState, set:IntervalSet):
206
+ super().__init__(target, set)
207
+ self.serializationType = self.NOT_SET
208
+
209
+ def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
210
+ return symbol >= minVocabSymbol \
211
+ and symbol <= maxVocabSymbol \
212
+ and not super(type(self), self).matches(symbol, minVocabSymbol, maxVocabSymbol)
213
+
214
+ def __str__(self):
215
+ return '~' + super(type(self), self).__str__()
216
+
217
+
218
+ class WildcardTransition(Transition):
219
+ __slots__ = 'serializationType'
220
+
221
+ def __init__(self, target:ATNState):
222
+ super().__init__(target)
223
+ self.serializationType = self.WILDCARD
224
+
225
+ def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
226
+ return symbol >= minVocabSymbol and symbol <= maxVocabSymbol
227
+
228
+ def __str__(self):
229
+ return "."
230
+
231
+
232
+ class PrecedencePredicateTransition(AbstractPredicateTransition):
233
+ __slots__ = ('serializationType', 'precedence')
234
+
235
+ def __init__(self, target:ATNState, precedence:int):
236
+ super().__init__(target)
237
+ self.serializationType = self.PRECEDENCE
238
+ self.precedence = precedence
239
+ self.isEpsilon = True
240
+
241
+ def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int):
242
+ return False
243
+
244
+
245
+ def getPredicate(self):
246
+ return PrecedencePredicate(self.precedence)
247
+
248
+ def __str__(self):
249
+ return self.precedence + " >= _p"
250
+
251
+
252
+ Transition.serializationTypes = {
253
+ EpsilonTransition: Transition.EPSILON,
254
+ RangeTransition: Transition.RANGE,
255
+ RuleTransition: Transition.RULE,
256
+ PredicateTransition: Transition.PREDICATE,
257
+ AtomTransition: Transition.ATOM,
258
+ ActionTransition: Transition.ACTION,
259
+ SetTransition: Transition.SET,
260
+ NotSetTransition: Transition.NOT_SET,
261
+ WildcardTransition: Transition.WILDCARD,
262
+ PrecedencePredicateTransition: Transition.PRECEDENCE
263
+ }
264
+
265
+ del ATNState
266
+ del RuleStartState
267
+
268
+ from antlr4.atn.ATNState import *
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/atn/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __author__ = 'ericvergnaud'
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFA.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ from antlr4.atn.ATNState import StarLoopEntryState
6
+
7
+ from antlr4.atn.ATNConfigSet import ATNConfigSet
8
+ from antlr4.atn.ATNState import DecisionState
9
+ from antlr4.dfa.DFAState import DFAState
10
+ from antlr4.error.Errors import IllegalStateException
11
+
12
+
13
+ class DFA(object):
14
+ __slots__ = ('atnStartState', 'decision', '_states', 's0', 'precedenceDfa')
15
+
16
+ def __init__(self, atnStartState:DecisionState, decision:int=0):
17
+ # From which ATN state did we create this DFA?
18
+ self.atnStartState = atnStartState
19
+ self.decision = decision
20
+ # A set of all DFA states. Use {@link Map} so we can get old state back
21
+ # ({@link Set} only allows you to see if it's there).
22
+ self._states = dict()
23
+ self.s0 = None
24
+ # {@code true} if this DFA is for a precedence decision; otherwise,
25
+ # {@code false}. This is the backing field for {@link #isPrecedenceDfa},
26
+ # {@link #setPrecedenceDfa}.
27
+ self.precedenceDfa = False
28
+
29
+ if isinstance(atnStartState, StarLoopEntryState):
30
+ if atnStartState.isPrecedenceDecision:
31
+ self.precedenceDfa = True
32
+ precedenceState = DFAState(configs=ATNConfigSet())
33
+ precedenceState.edges = []
34
+ precedenceState.isAcceptState = False
35
+ precedenceState.requiresFullContext = False
36
+ self.s0 = precedenceState
37
+
38
+
39
+ # Get the start state for a specific precedence value.
40
+ #
41
+ # @param precedence The current precedence.
42
+ # @return The start state corresponding to the specified precedence, or
43
+ # {@code null} if no start state exists for the specified precedence.
44
+ #
45
+ # @throws IllegalStateException if this is not a precedence DFA.
46
+ # @see #isPrecedenceDfa()
47
+
48
+ def getPrecedenceStartState(self, precedence:int):
49
+ if not self.precedenceDfa:
50
+ raise IllegalStateException("Only precedence DFAs may contain a precedence start state.")
51
+
52
+ # s0.edges is never null for a precedence DFA
53
+ if precedence < 0 or precedence >= len(self.s0.edges):
54
+ return None
55
+ return self.s0.edges[precedence]
56
+
57
+ # Set the start state for a specific precedence value.
58
+ #
59
+ # @param precedence The current precedence.
60
+ # @param startState The start state corresponding to the specified
61
+ # precedence.
62
+ #
63
+ # @throws IllegalStateException if this is not a precedence DFA.
64
+ # @see #isPrecedenceDfa()
65
+ #
66
+ def setPrecedenceStartState(self, precedence:int, startState:DFAState):
67
+ if not self.precedenceDfa:
68
+ raise IllegalStateException("Only precedence DFAs may contain a precedence start state.")
69
+
70
+ if precedence < 0:
71
+ return
72
+
73
+ # synchronization on s0 here is ok. when the DFA is turned into a
74
+ # precedence DFA, s0 will be initialized once and not updated again
75
+ # s0.edges is never null for a precedence DFA
76
+ if precedence >= len(self.s0.edges):
77
+ ext = [None] * (precedence + 1 - len(self.s0.edges))
78
+ self.s0.edges.extend(ext)
79
+ self.s0.edges[precedence] = startState
80
+ #
81
+ # Sets whether this is a precedence DFA. If the specified value differs
82
+ # from the current DFA configuration, the following actions are taken;
83
+ # otherwise no changes are made to the current DFA.
84
+ #
85
+ # <ul>
86
+ # <li>The {@link #states} map is cleared</li>
87
+ # <li>If {@code precedenceDfa} is {@code false}, the initial state
88
+ # {@link #s0} is set to {@code null}; otherwise, it is initialized to a new
89
+ # {@link DFAState} with an empty outgoing {@link DFAState#edges} array to
90
+ # store the start states for individual precedence values.</li>
91
+ # <li>The {@link #precedenceDfa} field is updated</li>
92
+ # </ul>
93
+ #
94
+ # @param precedenceDfa {@code true} if this is a precedence DFA; otherwise,
95
+ # {@code false}
96
+
97
+ def setPrecedenceDfa(self, precedenceDfa:bool):
98
+ if self.precedenceDfa != precedenceDfa:
99
+ self._states = dict()
100
+ if precedenceDfa:
101
+ precedenceState = DFAState(configs=ATNConfigSet())
102
+ precedenceState.edges = []
103
+ precedenceState.isAcceptState = False
104
+ precedenceState.requiresFullContext = False
105
+ self.s0 = precedenceState
106
+ else:
107
+ self.s0 = None
108
+ self.precedenceDfa = precedenceDfa
109
+
110
+ @property
111
+ def states(self):
112
+ return self._states
113
+
114
+ # Return a list of all states in this DFA, ordered by state number.
115
+ def sortedStates(self):
116
+ return sorted(self._states.keys(), key=lambda state: state.stateNumber)
117
+
118
+ def __str__(self):
119
+ return self.toString(None)
120
+
121
+ def toString(self, literalNames:list=None, symbolicNames:list=None):
122
+ if self.s0 is None:
123
+ return ""
124
+ from antlr4.dfa.DFASerializer import DFASerializer
125
+ serializer = DFASerializer(self,literalNames,symbolicNames)
126
+ return str(serializer)
127
+
128
+ def toLexerString(self):
129
+ if self.s0 is None:
130
+ return ""
131
+ from antlr4.dfa.DFASerializer import LexerDFASerializer
132
+ serializer = LexerDFASerializer(self)
133
+ return str(serializer)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFASerializer.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #/
6
+
7
+ # A DFA walker that knows how to dump them to serialized strings.#/
8
+ from io import StringIO
9
+ from antlr4 import DFA
10
+ from antlr4.Utils import str_list
11
+ from antlr4.dfa.DFAState import DFAState
12
+
13
+
14
+ class DFASerializer(object):
15
+ __slots__ = ('dfa', 'literalNames', 'symbolicNames')
16
+
17
+ def __init__(self, dfa:DFA, literalNames:list=None, symbolicNames:list=None):
18
+ self.dfa = dfa
19
+ self.literalNames = literalNames
20
+ self.symbolicNames = symbolicNames
21
+
22
+ def __str__(self):
23
+ if self.dfa.s0 is None:
24
+ return None
25
+ with StringIO() as buf:
26
+ for s in self.dfa.sortedStates():
27
+ n = 0
28
+ if s.edges is not None:
29
+ n = len(s.edges)
30
+ for i in range(0, n):
31
+ t = s.edges[i]
32
+ if t is not None and t.stateNumber != 0x7FFFFFFF:
33
+ buf.write(self.getStateString(s))
34
+ label = self.getEdgeLabel(i)
35
+ buf.write("-")
36
+ buf.write(label)
37
+ buf.write("->")
38
+ buf.write(self.getStateString(t))
39
+ buf.write('\n')
40
+ output = buf.getvalue()
41
+ if len(output)==0:
42
+ return None
43
+ else:
44
+ return output
45
+
46
+ def getEdgeLabel(self, i:int):
47
+ if i==0:
48
+ return "EOF"
49
+ if self.literalNames is not None and i<=len(self.literalNames):
50
+ return self.literalNames[i-1]
51
+ elif self.symbolicNames is not None and i<=len(self.symbolicNames):
52
+ return self.symbolicNames[i-1]
53
+ else:
54
+ return str(i-1)
55
+
56
+ def getStateString(self, s:DFAState):
57
+ n = s.stateNumber
58
+ baseStateStr = ( ":" if s.isAcceptState else "") + "s" + str(n) + ( "^" if s.requiresFullContext else "")
59
+ if s.isAcceptState:
60
+ if s.predicates is not None:
61
+ return baseStateStr + "=>" + str_list(s.predicates)
62
+ else:
63
+ return baseStateStr + "=>" + str(s.prediction)
64
+ else:
65
+ return baseStateStr
66
+
67
+ class LexerDFASerializer(DFASerializer):
68
+
69
+ def __init__(self, dfa:DFA):
70
+ super().__init__(dfa, None)
71
+
72
+ def getEdgeLabel(self, i:int):
73
+ return "'" + chr(i) + "'"
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/DFAState.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #/
6
+
7
+ # Map a predicate to a predicted alternative.#/
8
+ from io import StringIO
9
+ from antlr4.atn.ATNConfigSet import ATNConfigSet
10
+ from antlr4.atn.SemanticContext import SemanticContext
11
+
12
+
13
+ class PredPrediction(object):
14
+ __slots__ = ('alt', 'pred')
15
+
16
+ def __init__(self, pred:SemanticContext, alt:int):
17
+ self.alt = alt
18
+ self.pred = pred
19
+
20
+ def __str__(self):
21
+ return "(" + str(self.pred) + ", " + str(self.alt) + ")"
22
+
23
+ # A DFA state represents a set of possible ATN configurations.
24
+ # As Aho, Sethi, Ullman p. 117 says "The DFA uses its state
25
+ # to keep track of all possible states the ATN can be in after
26
+ # reading each input symbol. That is to say, after reading
27
+ # input a1a2..an, the DFA is in a state that represents the
28
+ # subset T of the states of the ATN that are reachable from the
29
+ # ATN's start state along some path labeled a1a2..an."
30
+ # In conventional NFA&rarr;DFA conversion, therefore, the subset T
31
+ # would be a bitset representing the set of states the
32
+ # ATN could be in. We need to track the alt predicted by each
33
+ # state as well, however. More importantly, we need to maintain
34
+ # a stack of states, tracking the closure operations as they
35
+ # jump from rule to rule, emulating rule invocations (method calls).
36
+ # I have to add a stack to simulate the proper lookahead sequences for
37
+ # the underlying LL grammar from which the ATN was derived.
38
+ #
39
+ # <p>I use a set of ATNConfig objects not simple states. An ATNConfig
40
+ # is both a state (ala normal conversion) and a RuleContext describing
41
+ # the chain of rules (if any) followed to arrive at that state.</p>
42
+ #
43
+ # <p>A DFA state may have multiple references to a particular state,
44
+ # but with different ATN contexts (with same or different alts)
45
+ # meaning that state was reached via a different set of rule invocations.</p>
46
+ #/
47
+ class DFAState(object):
48
+ __slots__ = (
49
+ 'stateNumber', 'configs', 'edges', 'isAcceptState', 'prediction',
50
+ 'lexerActionExecutor', 'requiresFullContext', 'predicates'
51
+ )
52
+
53
+ def __init__(self, stateNumber:int=-1, configs:ATNConfigSet=ATNConfigSet()):
54
+ self.stateNumber = stateNumber
55
+ self.configs = configs
56
+ # {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1)
57
+ # {@link Token#EOF} maps to {@code edges[0]}.
58
+ self.edges = None
59
+ self.isAcceptState = False
60
+ # if accept state, what ttype do we match or alt do we predict?
61
+ # This is set to {@link ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or
62
+ # {@link #requiresFullContext}.
63
+ self.prediction = 0
64
+ self.lexerActionExecutor = None
65
+ # Indicates that this state was created during SLL prediction that
66
+ # discovered a conflict between the configurations in the state. Future
67
+ # {@link ParserATNSimulator#execATN} invocations immediately jumped doing
68
+ # full context prediction if this field is true.
69
+ self.requiresFullContext = False
70
+ # During SLL parsing, this is a list of predicates associated with the
71
+ # ATN configurations of the DFA state. When we have predicates,
72
+ # {@link #requiresFullContext} is {@code false} since full context prediction evaluates predicates
73
+ # on-the-fly. If this is not null, then {@link #prediction} is
74
+ # {@link ATN#INVALID_ALT_NUMBER}.
75
+ #
76
+ # <p>We only use these for non-{@link #requiresFullContext} but conflicting states. That
77
+ # means we know from the context (it's $ or we don't dip into outer
78
+ # context) that it's an ambiguity not a conflict.</p>
79
+ #
80
+ # <p>This list is computed by {@link ParserATNSimulator#predicateDFAState}.</p>
81
+ self.predicates = None
82
+
83
+
84
+
85
+ # Get the set of all alts mentioned by all ATN configurations in this
86
+ # DFA state.
87
+ def getAltSet(self):
88
+ if self.configs is not None:
89
+ return set(cfg.alt for cfg in self.configs) or None
90
+ return None
91
+
92
+ def __hash__(self):
93
+ return hash(self.configs)
94
+
95
+ # Two {@link DFAState} instances are equal if their ATN configuration sets
96
+ # are the same. This method is used to see if a state already exists.
97
+ #
98
+ # <p>Because the number of alternatives and number of ATN configurations are
99
+ # finite, there is a finite number of DFA states that can be processed.
100
+ # This is necessary to show that the algorithm terminates.</p>
101
+ #
102
+ # <p>Cannot test the DFA state numbers here because in
103
+ # {@link ParserATNSimulator#addDFAState} we need to know if any other state
104
+ # exists that has this exact set of ATN configurations. The
105
+ # {@link #stateNumber} is irrelevant.</p>
106
+ def __eq__(self, other):
107
+ # compare set of ATN configurations in this set with other
108
+ if self is other:
109
+ return True
110
+ elif not isinstance(other, DFAState):
111
+ return False
112
+ else:
113
+ return self.configs==other.configs
114
+
115
+ def __str__(self):
116
+ with StringIO() as buf:
117
+ buf.write(str(self.stateNumber))
118
+ buf.write(":")
119
+ buf.write(str(self.configs))
120
+ if self.isAcceptState:
121
+ buf.write("=>")
122
+ if self.predicates is not None:
123
+ buf.write(str(self.predicates))
124
+ else:
125
+ buf.write(str(self.prediction))
126
+ return buf.getvalue()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/dfa/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __author__ = 'ericvergnaud'
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/DiagnosticErrorListener.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+
8
+ #
9
+ # This implementation of {@link ANTLRErrorListener} can be used to identify
10
+ # certain potential correctness and performance problems in grammars. "Reports"
11
+ # are made by calling {@link Parser#notifyErrorListeners} with the appropriate
12
+ # message.
13
+ #
14
+ # <ul>
15
+ # <li><b>Ambiguities</b>: These are cases where more than one path through the
16
+ # grammar can match the input.</li>
17
+ # <li><b>Weak context sensitivity</b>: These are cases where full-context
18
+ # prediction resolved an SLL conflict to a unique alternative which equaled the
19
+ # minimum alternative of the SLL conflict.</li>
20
+ # <li><b>Strong (forced) context sensitivity</b>: These are cases where the
21
+ # full-context prediction resolved an SLL conflict to a unique alternative,
22
+ # <em>and</em> the minimum alternative of the SLL conflict was found to not be
23
+ # a truly viable alternative. Two-stage parsing cannot be used for inputs where
24
+ # this situation occurs.</li>
25
+ # </ul>
26
+
27
+ from io import StringIO
28
+ from antlr4 import Parser, DFA
29
+ from antlr4.atn.ATNConfigSet import ATNConfigSet
30
+ from antlr4.error.ErrorListener import ErrorListener
31
+
32
+ class DiagnosticErrorListener(ErrorListener):
33
+
34
+ def __init__(self, exactOnly:bool=True):
35
+ # whether all ambiguities or only exact ambiguities are reported.
36
+ self.exactOnly = exactOnly
37
+
38
+ def reportAmbiguity(self, recognizer:Parser, dfa:DFA, startIndex:int,
39
+ stopIndex:int, exact:bool, ambigAlts:set, configs:ATNConfigSet):
40
+ if self.exactOnly and not exact:
41
+ return
42
+
43
+ with StringIO() as buf:
44
+ buf.write("reportAmbiguity d=")
45
+ buf.write(self.getDecisionDescription(recognizer, dfa))
46
+ buf.write(": ambigAlts=")
47
+ buf.write(str(self.getConflictingAlts(ambigAlts, configs)))
48
+ buf.write(", input='")
49
+ buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex))
50
+ buf.write("'")
51
+ recognizer.notifyErrorListeners(buf.getvalue())
52
+
53
+
54
+ def reportAttemptingFullContext(self, recognizer:Parser, dfa:DFA, startIndex:int,
55
+ stopIndex:int, conflictingAlts:set, configs:ATNConfigSet):
56
+ with StringIO() as buf:
57
+ buf.write("reportAttemptingFullContext d=")
58
+ buf.write(self.getDecisionDescription(recognizer, dfa))
59
+ buf.write(", input='")
60
+ buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex))
61
+ buf.write("'")
62
+ recognizer.notifyErrorListeners(buf.getvalue())
63
+
64
+ def reportContextSensitivity(self, recognizer:Parser, dfa:DFA, startIndex:int,
65
+ stopIndex:int, prediction:int, configs:ATNConfigSet):
66
+ with StringIO() as buf:
67
+ buf.write("reportContextSensitivity d=")
68
+ buf.write(self.getDecisionDescription(recognizer, dfa))
69
+ buf.write(", input='")
70
+ buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex))
71
+ buf.write("'")
72
+ recognizer.notifyErrorListeners(buf.getvalue())
73
+
74
+ def getDecisionDescription(self, recognizer:Parser, dfa:DFA):
75
+ decision = dfa.decision
76
+ ruleIndex = dfa.atnStartState.ruleIndex
77
+
78
+ ruleNames = recognizer.ruleNames
79
+ if ruleIndex < 0 or ruleIndex >= len(ruleNames):
80
+ return str(decision)
81
+
82
+ ruleName = ruleNames[ruleIndex]
83
+ if ruleName is None or len(ruleName)==0:
84
+ return str(decision)
85
+
86
+ return str(decision) + " (" + ruleName + ")"
87
+
88
+ #
89
+ # Computes the set of conflicting or ambiguous alternatives from a
90
+ # configuration set, if that information was not already provided by the
91
+ # parser.
92
+ #
93
+ # @param reportedAlts The set of conflicting or ambiguous alternatives, as
94
+ # reported by the parser.
95
+ # @param configs The conflicting or ambiguous configuration set.
96
+ # @return Returns {@code reportedAlts} if it is not {@code null}, otherwise
97
+ # returns the set of alternatives represented in {@code configs}.
98
+ #
99
+ def getConflictingAlts(self, reportedAlts:set, configs:ATNConfigSet):
100
+ if reportedAlts is not None:
101
+ return reportedAlts
102
+
103
+ result = set()
104
+ for config in configs:
105
+ result.add(config.alt)
106
+
107
+ return result
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorListener.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+
6
+ # Provides an empty default implementation of {@link ANTLRErrorListener}. The
7
+ # default implementation of each method does nothing, but can be overridden as
8
+ # necessary.
9
+
10
+
11
+ import sys
12
+
13
+ class ErrorListener(object):
14
+
15
+ def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
16
+ pass
17
+
18
+ def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
19
+ pass
20
+
21
+ def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs):
22
+ pass
23
+
24
+ def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
25
+ pass
26
+
27
+ class ConsoleErrorListener(ErrorListener):
28
+ #
29
+ # Provides a default instance of {@link ConsoleErrorListener}.
30
+ #
31
+ INSTANCE = None
32
+
33
+ #
34
+ # {@inheritDoc}
35
+ #
36
+ # <p>
37
+ # This implementation prints messages to {@link System#err} containing the
38
+ # values of {@code line}, {@code charPositionInLine}, and {@code msg} using
39
+ # the following format.</p>
40
+ #
41
+ # <pre>
42
+ # line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
43
+ # </pre>
44
+ #
45
+ def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
46
+ print("line " + str(line) + ":" + str(column) + " " + msg, file=sys.stderr)
47
+
48
+ ConsoleErrorListener.INSTANCE = ConsoleErrorListener()
49
+
50
+ class ProxyErrorListener(ErrorListener):
51
+
52
+ def __init__(self, delegates):
53
+ super().__init__()
54
+ if delegates is None:
55
+ raise ReferenceError("delegates")
56
+ self.delegates = delegates
57
+
58
+ def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
59
+ for delegate in self.delegates:
60
+ delegate.syntaxError(recognizer, offendingSymbol, line, column, msg, e)
61
+
62
+ def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
63
+ for delegate in self.delegates:
64
+ delegate.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
65
+
66
+ def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs):
67
+ for delegate in self.delegates:
68
+ delegate.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
69
+
70
+ def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
71
+ for delegate in self.delegates:
72
+ delegate.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/ErrorStrategy.py ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+ import sys
7
+ from antlr4.IntervalSet import IntervalSet
8
+
9
+ from antlr4.Token import Token
10
+ from antlr4.atn.ATNState import ATNState
11
+ from antlr4.error.Errors import RecognitionException, NoViableAltException, InputMismatchException, \
12
+ FailedPredicateException, ParseCancellationException
13
+
14
+ # need forward declaration
15
+ Parser = None
16
+
17
+ class ErrorStrategy(object):
18
+
19
+ def reset(self, recognizer:Parser):
20
+ pass
21
+
22
+ def recoverInline(self, recognizer:Parser):
23
+ pass
24
+
25
+ def recover(self, recognizer:Parser, e:RecognitionException):
26
+ pass
27
+
28
+ def sync(self, recognizer:Parser):
29
+ pass
30
+
31
+ def inErrorRecoveryMode(self, recognizer:Parser):
32
+ pass
33
+
34
+ def reportError(self, recognizer:Parser, e:RecognitionException):
35
+ pass
36
+
37
+
38
+ # This is the default implementation of {@link ANTLRErrorStrategy} used for
39
+ # error reporting and recovery in ANTLR parsers.
40
+ #
41
+ class DefaultErrorStrategy(ErrorStrategy):
42
+
43
+ def __init__(self):
44
+ super().__init__()
45
+ # Indicates whether the error strategy is currently "recovering from an
46
+ # error". This is used to suppress reporting multiple error messages while
47
+ # attempting to recover from a detected syntax error.
48
+ #
49
+ # @see #inErrorRecoveryMode
50
+ #
51
+ self.errorRecoveryMode = False
52
+
53
+ # The index into the input stream where the last error occurred.
54
+ # This is used to prevent infinite loops where an error is found
55
+ # but no token is consumed during recovery...another error is found,
56
+ # ad nauseum. This is a failsafe mechanism to guarantee that at least
57
+ # one token/tree node is consumed for two errors.
58
+ #
59
+ self.lastErrorIndex = -1
60
+ self.lastErrorStates = None
61
+ self.nextTokensContext = None
62
+ self.nextTokenState = 0
63
+
64
+ # <p>The default implementation simply calls {@link #endErrorCondition} to
65
+ # ensure that the handler is not in error recovery mode.</p>
66
+ def reset(self, recognizer:Parser):
67
+ self.endErrorCondition(recognizer)
68
+
69
+ #
70
+ # This method is called to enter error recovery mode when a recognition
71
+ # exception is reported.
72
+ #
73
+ # @param recognizer the parser instance
74
+ #
75
+ def beginErrorCondition(self, recognizer:Parser):
76
+ self.errorRecoveryMode = True
77
+
78
+ def inErrorRecoveryMode(self, recognizer:Parser):
79
+ return self.errorRecoveryMode
80
+
81
+ #
82
+ # This method is called to leave error recovery mode after recovering from
83
+ # a recognition exception.
84
+ #
85
+ # @param recognizer
86
+ #
87
+ def endErrorCondition(self, recognizer:Parser):
88
+ self.errorRecoveryMode = False
89
+ self.lastErrorStates = None
90
+ self.lastErrorIndex = -1
91
+
92
+ #
93
+ # {@inheritDoc}
94
+ #
95
+ # <p>The default implementation simply calls {@link #endErrorCondition}.</p>
96
+ #
97
+ def reportMatch(self, recognizer:Parser):
98
+ self.endErrorCondition(recognizer)
99
+
100
+ #
101
+ # {@inheritDoc}
102
+ #
103
+ # <p>The default implementation returns immediately if the handler is already
104
+ # in error recovery mode. Otherwise, it calls {@link #beginErrorCondition}
105
+ # and dispatches the reporting task based on the runtime type of {@code e}
106
+ # according to the following table.</p>
107
+ #
108
+ # <ul>
109
+ # <li>{@link NoViableAltException}: Dispatches the call to
110
+ # {@link #reportNoViableAlternative}</li>
111
+ # <li>{@link InputMismatchException}: Dispatches the call to
112
+ # {@link #reportInputMismatch}</li>
113
+ # <li>{@link FailedPredicateException}: Dispatches the call to
114
+ # {@link #reportFailedPredicate}</li>
115
+ # <li>All other types: calls {@link Parser#notifyErrorListeners} to report
116
+ # the exception</li>
117
+ # </ul>
118
+ #
119
+ def reportError(self, recognizer:Parser, e:RecognitionException):
120
+ # if we've already reported an error and have not matched a token
121
+ # yet successfully, don't report any errors.
122
+ if self.inErrorRecoveryMode(recognizer):
123
+ return # don't report spurious errors
124
+ self.beginErrorCondition(recognizer)
125
+ if isinstance( e, NoViableAltException ):
126
+ self.reportNoViableAlternative(recognizer, e)
127
+ elif isinstance( e, InputMismatchException ):
128
+ self.reportInputMismatch(recognizer, e)
129
+ elif isinstance( e, FailedPredicateException ):
130
+ self.reportFailedPredicate(recognizer, e)
131
+ else:
132
+ print("unknown recognition error type: " + type(e).__name__)
133
+ recognizer.notifyErrorListeners(e.message, e.offendingToken, e)
134
+
135
+ #
136
+ # {@inheritDoc}
137
+ #
138
+ # <p>The default implementation resynchronizes the parser by consuming tokens
139
+ # until we find one in the resynchronization set--loosely the set of tokens
140
+ # that can follow the current rule.</p>
141
+ #
142
+ def recover(self, recognizer:Parser, e:RecognitionException):
143
+ if self.lastErrorIndex==recognizer.getInputStream().index \
144
+ and self.lastErrorStates is not None \
145
+ and recognizer.state in self.lastErrorStates:
146
+ # uh oh, another error at same token index and previously-visited
147
+ # state in ATN; must be a case where LT(1) is in the recovery
148
+ # token set so nothing got consumed. Consume a single token
149
+ # at least to prevent an infinite loop; this is a failsafe.
150
+ recognizer.consume()
151
+
152
+ self.lastErrorIndex = recognizer._input.index
153
+ if self.lastErrorStates is None:
154
+ self.lastErrorStates = []
155
+ self.lastErrorStates.append(recognizer.state)
156
+ followSet = self.getErrorRecoverySet(recognizer)
157
+ self.consumeUntil(recognizer, followSet)
158
+
159
+ # The default implementation of {@link ANTLRErrorStrategy#sync} makes sure
160
+ # that the current lookahead symbol is consistent with what were expecting
161
+ # at this point in the ATN. You can call this anytime but ANTLR only
162
+ # generates code to check before subrules/loops and each iteration.
163
+ #
164
+ # <p>Implements Jim Idle's magic sync mechanism in closures and optional
165
+ # subrules. E.g.,</p>
166
+ #
167
+ # <pre>
168
+ # a : sync ( stuff sync )* ;
169
+ # sync : {consume to what can follow sync} ;
170
+ # </pre>
171
+ #
172
+ # At the start of a sub rule upon error, {@link #sync} performs single
173
+ # token deletion, if possible. If it can't do that, it bails on the current
174
+ # rule and uses the default error recovery, which consumes until the
175
+ # resynchronization set of the current rule.
176
+ #
177
+ # <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
178
+ # with an empty alternative), then the expected set includes what follows
179
+ # the subrule.</p>
180
+ #
181
+ # <p>During loop iteration, it consumes until it sees a token that can start a
182
+ # sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
183
+ # stay in the loop as long as possible.</p>
184
+ #
185
+ # <p><strong>ORIGINS</strong></p>
186
+ #
187
+ # <p>Previous versions of ANTLR did a poor job of their recovery within loops.
188
+ # A single mismatch token or missing token would force the parser to bail
189
+ # out of the entire rules surrounding the loop. So, for rule</p>
190
+ #
191
+ # <pre>
192
+ # classDef : 'class' ID '{' member* '}'
193
+ # </pre>
194
+ #
195
+ # input with an extra token between members would force the parser to
196
+ # consume until it found the next class definition rather than the next
197
+ # member definition of the current class.
198
+ #
199
+ # <p>This functionality cost a little bit of effort because the parser has to
200
+ # compare token set at the start of the loop and at each iteration. If for
201
+ # some reason speed is suffering for you, you can turn off this
202
+ # functionality by simply overriding this method as a blank { }.</p>
203
+ #
204
+ def sync(self, recognizer:Parser):
205
+ # If already recovering, don't try to sync
206
+ if self.inErrorRecoveryMode(recognizer):
207
+ return
208
+
209
+ s = recognizer._interp.atn.states[recognizer.state]
210
+ la = recognizer.getTokenStream().LA(1)
211
+ # try cheaper subset first; might get lucky. seems to shave a wee bit off
212
+ nextTokens = recognizer.atn.nextTokens(s)
213
+ if la in nextTokens:
214
+ self.nextTokensContext = None
215
+ self.nextTokenState = ATNState.INVALID_STATE_NUMBER
216
+ return
217
+ elif Token.EPSILON in nextTokens:
218
+ if self.nextTokensContext is None:
219
+ # It's possible the next token won't match information tracked
220
+ # by sync is restricted for performance.
221
+ self.nextTokensContext = recognizer._ctx
222
+ self.nextTokensState = recognizer._stateNumber
223
+ return
224
+
225
+ if s.stateType in [ATNState.BLOCK_START, ATNState.STAR_BLOCK_START,
226
+ ATNState.PLUS_BLOCK_START, ATNState.STAR_LOOP_ENTRY]:
227
+ # report error and recover if possible
228
+ if self.singleTokenDeletion(recognizer)is not None:
229
+ return
230
+ else:
231
+ raise InputMismatchException(recognizer)
232
+
233
+ elif s.stateType in [ATNState.PLUS_LOOP_BACK, ATNState.STAR_LOOP_BACK]:
234
+ self.reportUnwantedToken(recognizer)
235
+ expecting = recognizer.getExpectedTokens()
236
+ whatFollowsLoopIterationOrRule = expecting.addSet(self.getErrorRecoverySet(recognizer))
237
+ self.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
238
+
239
+ else:
240
+ # do nothing if we can't identify the exact kind of ATN state
241
+ pass
242
+
243
+ # This is called by {@link #reportError} when the exception is a
244
+ # {@link NoViableAltException}.
245
+ #
246
+ # @see #reportError
247
+ #
248
+ # @param recognizer the parser instance
249
+ # @param e the recognition exception
250
+ #
251
+ def reportNoViableAlternative(self, recognizer:Parser, e:NoViableAltException):
252
+ tokens = recognizer.getTokenStream()
253
+ if tokens is not None:
254
+ if e.startToken.type==Token.EOF:
255
+ input = "<EOF>"
256
+ else:
257
+ input = tokens.getText(e.startToken, e.offendingToken)
258
+ else:
259
+ input = "<unknown input>"
260
+ msg = "no viable alternative at input " + self.escapeWSAndQuote(input)
261
+ recognizer.notifyErrorListeners(msg, e.offendingToken, e)
262
+
263
+ #
264
+ # This is called by {@link #reportError} when the exception is an
265
+ # {@link InputMismatchException}.
266
+ #
267
+ # @see #reportError
268
+ #
269
+ # @param recognizer the parser instance
270
+ # @param e the recognition exception
271
+ #
272
+ def reportInputMismatch(self, recognizer:Parser, e:InputMismatchException):
273
+ msg = "mismatched input " + self.getTokenErrorDisplay(e.offendingToken) \
274
+ + " expecting " + e.getExpectedTokens().toString(recognizer.literalNames, recognizer.symbolicNames)
275
+ recognizer.notifyErrorListeners(msg, e.offendingToken, e)
276
+
277
+ #
278
+ # This is called by {@link #reportError} when the exception is a
279
+ # {@link FailedPredicateException}.
280
+ #
281
+ # @see #reportError
282
+ #
283
+ # @param recognizer the parser instance
284
+ # @param e the recognition exception
285
+ #
286
+ def reportFailedPredicate(self, recognizer, e):
287
+ ruleName = recognizer.ruleNames[recognizer._ctx.getRuleIndex()]
288
+ msg = "rule " + ruleName + " " + e.message
289
+ recognizer.notifyErrorListeners(msg, e.offendingToken, e)
290
+
291
+ # This method is called to report a syntax error which requires the removal
292
+ # of a token from the input stream. At the time this method is called, the
293
+ # erroneous symbol is current {@code LT(1)} symbol and has not yet been
294
+ # removed from the input stream. When this method returns,
295
+ # {@code recognizer} is in error recovery mode.
296
+ #
297
+ # <p>This method is called when {@link #singleTokenDeletion} identifies
298
+ # single-token deletion as a viable recovery strategy for a mismatched
299
+ # input error.</p>
300
+ #
301
+ # <p>The default implementation simply returns if the handler is already in
302
+ # error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
303
+ # enter error recovery mode, followed by calling
304
+ # {@link Parser#notifyErrorListeners}.</p>
305
+ #
306
+ # @param recognizer the parser instance
307
+ #
308
+ def reportUnwantedToken(self, recognizer:Parser):
309
+ if self.inErrorRecoveryMode(recognizer):
310
+ return
311
+
312
+ self.beginErrorCondition(recognizer)
313
+ t = recognizer.getCurrentToken()
314
+ tokenName = self.getTokenErrorDisplay(t)
315
+ expecting = self.getExpectedTokens(recognizer)
316
+ msg = "extraneous input " + tokenName + " expecting " \
317
+ + expecting.toString(recognizer.literalNames, recognizer.symbolicNames)
318
+ recognizer.notifyErrorListeners(msg, t, None)
319
+
320
+ # This method is called to report a syntax error which requires the
321
+ # insertion of a missing token into the input stream. At the time this
322
+ # method is called, the missing token has not yet been inserted. When this
323
+ # method returns, {@code recognizer} is in error recovery mode.
324
+ #
325
+ # <p>This method is called when {@link #singleTokenInsertion} identifies
326
+ # single-token insertion as a viable recovery strategy for a mismatched
327
+ # input error.</p>
328
+ #
329
+ # <p>The default implementation simply returns if the handler is already in
330
+ # error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
331
+ # enter error recovery mode, followed by calling
332
+ # {@link Parser#notifyErrorListeners}.</p>
333
+ #
334
+ # @param recognizer the parser instance
335
+ #
336
+ def reportMissingToken(self, recognizer:Parser):
337
+ if self.inErrorRecoveryMode(recognizer):
338
+ return
339
+ self.beginErrorCondition(recognizer)
340
+ t = recognizer.getCurrentToken()
341
+ expecting = self.getExpectedTokens(recognizer)
342
+ msg = "missing " + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) \
343
+ + " at " + self.getTokenErrorDisplay(t)
344
+ recognizer.notifyErrorListeners(msg, t, None)
345
+
346
+ # <p>The default implementation attempts to recover from the mismatched input
347
+ # by using single token insertion and deletion as described below. If the
348
+ # recovery attempt fails, this method throws an
349
+ # {@link InputMismatchException}.</p>
350
+ #
351
+ # <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
352
+ #
353
+ # <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
354
+ # right token, however, then assume {@code LA(1)} is some extra spurious
355
+ # token and delete it. Then consume and return the next token (which was
356
+ # the {@code LA(2)} token) as the successful result of the match operation.</p>
357
+ #
358
+ # <p>This recovery strategy is implemented by {@link #singleTokenDeletion}.</p>
359
+ #
360
+ # <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
361
+ #
362
+ # <p>If current token (at {@code LA(1)}) is consistent with what could come
363
+ # after the expected {@code LA(1)} token, then assume the token is missing
364
+ # and use the parser's {@link TokenFactory} to create it on the fly. The
365
+ # "insertion" is performed by returning the created token as the successful
366
+ # result of the match operation.</p>
367
+ #
368
+ # <p>This recovery strategy is implemented by {@link #singleTokenInsertion}.</p>
369
+ #
370
+ # <p><strong>EXAMPLE</strong></p>
371
+ #
372
+ # <p>For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When
373
+ # the parser returns from the nested call to {@code expr}, it will have
374
+ # call chain:</p>
375
+ #
376
+ # <pre>
377
+ # stat &rarr; expr &rarr; atom
378
+ # </pre>
379
+ #
380
+ # and it will be trying to match the {@code ')'} at this point in the
381
+ # derivation:
382
+ #
383
+ # <pre>
384
+ # =&gt; ID '=' '(' INT ')' ('+' atom)* ';'
385
+ # ^
386
+ # </pre>
387
+ #
388
+ # The attempt to match {@code ')'} will fail when it sees {@code ';'} and
389
+ # call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'}
390
+ # is in the set of tokens that can follow the {@code ')'} token reference
391
+ # in rule {@code atom}. It can assume that you forgot the {@code ')'}.
392
+ #
393
+ def recoverInline(self, recognizer:Parser):
394
+ # SINGLE TOKEN DELETION
395
+ matchedSymbol = self.singleTokenDeletion(recognizer)
396
+ if matchedSymbol is not None:
397
+ # we have deleted the extra token.
398
+ # now, move past ttype token as if all were ok
399
+ recognizer.consume()
400
+ return matchedSymbol
401
+
402
+ # SINGLE TOKEN INSERTION
403
+ if self.singleTokenInsertion(recognizer):
404
+ return self.getMissingSymbol(recognizer)
405
+
406
+ # even that didn't work; must throw the exception
407
+ raise InputMismatchException(recognizer)
408
+
409
+ #
410
+ # This method implements the single-token insertion inline error recovery
411
+ # strategy. It is called by {@link #recoverInline} if the single-token
412
+ # deletion strategy fails to recover from the mismatched input. If this
413
+ # method returns {@code true}, {@code recognizer} will be in error recovery
414
+ # mode.
415
+ #
416
+ # <p>This method determines whether or not single-token insertion is viable by
417
+ # checking if the {@code LA(1)} input symbol could be successfully matched
418
+ # if it were instead the {@code LA(2)} symbol. If this method returns
419
+ # {@code true}, the caller is responsible for creating and inserting a
420
+ # token with the correct type to produce this behavior.</p>
421
+ #
422
+ # @param recognizer the parser instance
423
+ # @return {@code true} if single-token insertion is a viable recovery
424
+ # strategy for the current mismatched input, otherwise {@code false}
425
+ #
426
+ def singleTokenInsertion(self, recognizer:Parser):
427
+ currentSymbolType = recognizer.getTokenStream().LA(1)
428
+ # if current token is consistent with what could come after current
429
+ # ATN state, then we know we're missing a token; error recovery
430
+ # is free to conjure up and insert the missing token
431
+ atn = recognizer._interp.atn
432
+ currentState = atn.states[recognizer.state]
433
+ next = currentState.transitions[0].target
434
+ expectingAtLL2 = atn.nextTokens(next, recognizer._ctx)
435
+ if currentSymbolType in expectingAtLL2:
436
+ self.reportMissingToken(recognizer)
437
+ return True
438
+ else:
439
+ return False
440
+
441
+ # This method implements the single-token deletion inline error recovery
442
+ # strategy. It is called by {@link #recoverInline} to attempt to recover
443
+ # from mismatched input. If this method returns null, the parser and error
444
+ # handler state will not have changed. If this method returns non-null,
445
+ # {@code recognizer} will <em>not</em> be in error recovery mode since the
446
+ # returned token was a successful match.
447
+ #
448
+ # <p>If the single-token deletion is successful, this method calls
449
+ # {@link #reportUnwantedToken} to report the error, followed by
450
+ # {@link Parser#consume} to actually "delete" the extraneous token. Then,
451
+ # before returning {@link #reportMatch} is called to signal a successful
452
+ # match.</p>
453
+ #
454
+ # @param recognizer the parser instance
455
+ # @return the successfully matched {@link Token} instance if single-token
456
+ # deletion successfully recovers from the mismatched input, otherwise
457
+ # {@code null}
458
+ #
459
+ def singleTokenDeletion(self, recognizer:Parser):
460
+ nextTokenType = recognizer.getTokenStream().LA(2)
461
+ expecting = self.getExpectedTokens(recognizer)
462
+ if nextTokenType in expecting:
463
+ self.reportUnwantedToken(recognizer)
464
+ # print("recoverFromMismatchedToken deleting " \
465
+ # + str(recognizer.getTokenStream().LT(1)) \
466
+ # + " since " + str(recognizer.getTokenStream().LT(2)) \
467
+ # + " is what we want", file=sys.stderr)
468
+ recognizer.consume() # simply delete extra token
469
+ # we want to return the token we're actually matching
470
+ matchedSymbol = recognizer.getCurrentToken()
471
+ self.reportMatch(recognizer) # we know current token is correct
472
+ return matchedSymbol
473
+ else:
474
+ return None
475
+
476
+ # Conjure up a missing token during error recovery.
477
+ #
478
+ # The recognizer attempts to recover from single missing
479
+ # symbols. But, actions might refer to that missing symbol.
480
+ # For example, x=ID {f($x);}. The action clearly assumes
481
+ # that there has been an identifier matched previously and that
482
+ # $x points at that token. If that token is missing, but
483
+ # the next token in the stream is what we want we assume that
484
+ # this token is missing and we keep going. Because we
485
+ # have to return some token to replace the missing token,
486
+ # we have to conjure one up. This method gives the user control
487
+ # over the tokens returned for missing tokens. Mostly,
488
+ # you will want to create something special for identifier
489
+ # tokens. For literals such as '{' and ',', the default
490
+ # action in the parser or tree parser works. It simply creates
491
+ # a CommonToken of the appropriate type. The text will be the token.
492
+ # If you change what tokens must be created by the lexer,
493
+ # override this method to create the appropriate tokens.
494
+ #
495
+ def getMissingSymbol(self, recognizer:Parser):
496
+ currentSymbol = recognizer.getCurrentToken()
497
+ expecting = self.getExpectedTokens(recognizer)
498
+ expectedTokenType = expecting[0] # get any element
499
+ if expectedTokenType==Token.EOF:
500
+ tokenText = "<missing EOF>"
501
+ else:
502
+ name = None
503
+ if expectedTokenType < len(recognizer.literalNames):
504
+ name = recognizer.literalNames[expectedTokenType]
505
+ if name is None and expectedTokenType < len(recognizer.symbolicNames):
506
+ name = recognizer.symbolicNames[expectedTokenType]
507
+ tokenText = "<missing " + str(name) + ">"
508
+ current = currentSymbol
509
+ lookback = recognizer.getTokenStream().LT(-1)
510
+ if current.type==Token.EOF and lookback is not None:
511
+ current = lookback
512
+ return recognizer.getTokenFactory().create(current.source,
513
+ expectedTokenType, tokenText, Token.DEFAULT_CHANNEL,
514
+ -1, -1, current.line, current.column)
515
+
516
+ def getExpectedTokens(self, recognizer:Parser):
517
+ return recognizer.getExpectedTokens()
518
+
519
+ # How should a token be displayed in an error message? The default
520
+ # is to display just the text, but during development you might
521
+ # want to have a lot of information spit out. Override in that case
522
+ # to use t.toString() (which, for CommonToken, dumps everything about
523
+ # the token). This is better than forcing you to override a method in
524
+ # your token objects because you don't have to go modify your lexer
525
+ # so that it creates a new Java type.
526
+ #
527
+ def getTokenErrorDisplay(self, t:Token):
528
+ if t is None:
529
+ return "<no token>"
530
+ s = t.text
531
+ if s is None:
532
+ if t.type==Token.EOF:
533
+ s = "<EOF>"
534
+ else:
535
+ s = "<" + str(t.type) + ">"
536
+ return self.escapeWSAndQuote(s)
537
+
538
+ def escapeWSAndQuote(self, s:str):
539
+ s = s.replace("\n","\\n")
540
+ s = s.replace("\r","\\r")
541
+ s = s.replace("\t","\\t")
542
+ return "'" + s + "'"
543
+
544
+ # Compute the error recovery set for the current rule. During
545
+ # rule invocation, the parser pushes the set of tokens that can
546
+ # follow that rule reference on the stack; this amounts to
547
+ # computing FIRST of what follows the rule reference in the
548
+ # enclosing rule. See LinearApproximator.FIRST().
549
+ # This local follow set only includes tokens
550
+ # from within the rule; i.e., the FIRST computation done by
551
+ # ANTLR stops at the end of a rule.
552
+ #
553
+ # EXAMPLE
554
+ #
555
+ # When you find a "no viable alt exception", the input is not
556
+ # consistent with any of the alternatives for rule r. The best
557
+ # thing to do is to consume tokens until you see something that
558
+ # can legally follow a call to r#or* any rule that called r.
559
+ # You don't want the exact set of viable next tokens because the
560
+ # input might just be missing a token--you might consume the
561
+ # rest of the input looking for one of the missing tokens.
562
+ #
563
+ # Consider grammar:
564
+ #
565
+ # a : '[' b ']'
566
+ # | '(' b ')'
567
+ # ;
568
+ # b : c '^' INT ;
569
+ # c : ID
570
+ # | INT
571
+ # ;
572
+ #
573
+ # At each rule invocation, the set of tokens that could follow
574
+ # that rule is pushed on a stack. Here are the various
575
+ # context-sensitive follow sets:
576
+ #
577
+ # FOLLOW(b1_in_a) = FIRST(']') = ']'
578
+ # FOLLOW(b2_in_a) = FIRST(')') = ')'
579
+ # FOLLOW(c_in_b) = FIRST('^') = '^'
580
+ #
581
+ # Upon erroneous input "[]", the call chain is
582
+ #
583
+ # a -> b -> c
584
+ #
585
+ # and, hence, the follow context stack is:
586
+ #
587
+ # depth follow set start of rule execution
588
+ # 0 <EOF> a (from main())
589
+ # 1 ']' b
590
+ # 2 '^' c
591
+ #
592
+ # Notice that ')' is not included, because b would have to have
593
+ # been called from a different context in rule a for ')' to be
594
+ # included.
595
+ #
596
+ # For error recovery, we cannot consider FOLLOW(c)
597
+ # (context-sensitive or otherwise). We need the combined set of
598
+ # all context-sensitive FOLLOW sets--the set of all tokens that
599
+ # could follow any reference in the call chain. We need to
600
+ # resync to one of those tokens. Note that FOLLOW(c)='^' and if
601
+ # we resync'd to that token, we'd consume until EOF. We need to
602
+ # sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
603
+ # In this case, for input "[]", LA(1) is ']' and in the set, so we would
604
+ # not consume anything. After printing an error, rule c would
605
+ # return normally. Rule b would not find the required '^' though.
606
+ # At this point, it gets a mismatched token error and throws an
607
+ # exception (since LA(1) is not in the viable following token
608
+ # set). The rule exception handler tries to recover, but finds
609
+ # the same recovery set and doesn't consume anything. Rule b
610
+ # exits normally returning to rule a. Now it finds the ']' (and
611
+ # with the successful match exits errorRecovery mode).
612
+ #
613
+ # So, you can see that the parser walks up the call chain looking
614
+ # for the token that was a member of the recovery set.
615
+ #
616
+ # Errors are not generated in errorRecovery mode.
617
+ #
618
+ # ANTLR's error recovery mechanism is based upon original ideas:
619
+ #
620
+ # "Algorithms + Data Structures = Programs" by Niklaus Wirth
621
+ #
622
+ # and
623
+ #
624
+ # "A note on error recovery in recursive descent parsers":
625
+ # http:#portal.acm.org/citation.cfm?id=947902.947905
626
+ #
627
+ # Later, Josef Grosch had some good ideas:
628
+ #
629
+ # "Efficient and Comfortable Error Recovery in Recursive Descent
630
+ # Parsers":
631
+ # ftp:#www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
632
+ #
633
+ # Like Grosch I implement context-sensitive FOLLOW sets that are combined
634
+ # at run-time upon error to avoid overhead during parsing.
635
+ #
636
+ def getErrorRecoverySet(self, recognizer:Parser):
637
+ atn = recognizer._interp.atn
638
+ ctx = recognizer._ctx
639
+ recoverSet = IntervalSet()
640
+ while ctx is not None and ctx.invokingState>=0:
641
+ # compute what follows who invoked us
642
+ invokingState = atn.states[ctx.invokingState]
643
+ rt = invokingState.transitions[0]
644
+ follow = atn.nextTokens(rt.followState)
645
+ recoverSet.addSet(follow)
646
+ ctx = ctx.parentCtx
647
+ recoverSet.removeOne(Token.EPSILON)
648
+ return recoverSet
649
+
650
+ # Consume tokens until one matches the given token set.#
651
+ def consumeUntil(self, recognizer:Parser, set_:set):
652
+ ttype = recognizer.getTokenStream().LA(1)
653
+ while ttype != Token.EOF and not ttype in set_:
654
+ recognizer.consume()
655
+ ttype = recognizer.getTokenStream().LA(1)
656
+
657
+
658
+ #
659
+ # This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
660
+ # by immediately canceling the parse operation with a
661
+ # {@link ParseCancellationException}. The implementation ensures that the
662
+ # {@link ParserRuleContext#exception} field is set for all parse tree nodes
663
+ # that were not completed prior to encountering the error.
664
+ #
665
+ # <p>
666
+ # This error strategy is useful in the following scenarios.</p>
667
+ #
668
+ # <ul>
669
+ # <li><strong>Two-stage parsing:</strong> This error strategy allows the first
670
+ # stage of two-stage parsing to immediately terminate if an error is
671
+ # encountered, and immediately fall back to the second stage. In addition to
672
+ # avoiding wasted work by attempting to recover from errors here, the empty
673
+ # implementation of {@link BailErrorStrategy#sync} improves the performance of
674
+ # the first stage.</li>
675
+ # <li><strong>Silent validation:</strong> When syntax errors are not being
676
+ # reported or logged, and the parse result is simply ignored if errors occur,
677
+ # the {@link BailErrorStrategy} avoids wasting work on recovering from errors
678
+ # when the result will be ignored either way.</li>
679
+ # </ul>
680
+ #
681
+ # <p>
682
+ # {@code myparser.setErrorHandler(new BailErrorStrategy());}</p>
683
+ #
684
+ # @see Parser#setErrorHandler(ANTLRErrorStrategy)
685
+ #
686
+ class BailErrorStrategy(DefaultErrorStrategy):
687
+ # Instead of recovering from exception {@code e}, re-throw it wrapped
688
+ # in a {@link ParseCancellationException} so it is not caught by the
689
+ # rule function catches. Use {@link Exception#getCause()} to get the
690
+ # original {@link RecognitionException}.
691
+ #
692
+ def recover(self, recognizer:Parser, e:RecognitionException):
693
+ context = recognizer._ctx
694
+ while context is not None:
695
+ context.exception = e
696
+ context = context.parentCtx
697
+ raise ParseCancellationException(e)
698
+
699
+ # Make sure we don't attempt to recover inline; if the parser
700
+ # successfully recovers, it won't throw an exception.
701
+ #
702
+ def recoverInline(self, recognizer:Parser):
703
+ self.recover(recognizer, InputMismatchException(recognizer))
704
+
705
+ # Make sure we don't attempt to recover from problems in subrules.#
706
+ def sync(self, recognizer:Parser):
707
+ pass
708
+
709
+ del Parser
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/Errors.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
2
+ # Use of this file is governed by the BSD 3-clause license that
3
+ # can be found in the LICENSE.txt file in the project root.
4
+ #
5
+
6
+ # need forward declaration
7
+ Token = None
8
+ Lexer = None
9
+ Parser = None
10
+ TokenStream = None
11
+ ATNConfigSet = None
12
+ ParserRulecontext = None
13
+ PredicateTransition = None
14
+ BufferedTokenStream = None
15
+
16
+ class UnsupportedOperationException(Exception):
17
+
18
+ def __init__(self, msg:str):
19
+ super().__init__(msg)
20
+
21
+ class IllegalStateException(Exception):
22
+
23
+ def __init__(self, msg:str):
24
+ super().__init__(msg)
25
+
26
+ class CancellationException(IllegalStateException):
27
+
28
+ def __init__(self, msg:str):
29
+ super().__init__(msg)
30
+
31
+ # The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
32
+ # 3 kinds of errors: prediction errors, failed predicate errors, and
33
+ # mismatched input errors. In each case, the parser knows where it is
34
+ # in the input, where it is in the ATN, the rule invocation stack,
35
+ # and what kind of problem occurred.
36
+
37
+ from antlr4.InputStream import InputStream
38
+ from antlr4.ParserRuleContext import ParserRuleContext
39
+ from antlr4.Recognizer import Recognizer
40
+
41
+ class RecognitionException(Exception):
42
+
43
+
44
+ def __init__(self, message:str=None, recognizer:Recognizer=None, input:InputStream=None, ctx:ParserRulecontext=None):
45
+ super().__init__(message)
46
+ self.message = message
47
+ self.recognizer = recognizer
48
+ self.input = input
49
+ self.ctx = ctx
50
+ # The current {@link Token} when an error occurred. Since not all streams
51
+ # support accessing symbols by index, we have to track the {@link Token}
52
+ # instance itself.
53
+ self.offendingToken = None
54
+ # Get the ATN state number the parser was in at the time the error
55
+ # occurred. For {@link NoViableAltException} and
56
+ # {@link LexerNoViableAltException} exceptions, this is the
57
+ # {@link DecisionState} number. For others, it is the state whose outgoing
58
+ # edge we couldn't match.
59
+ self.offendingState = -1
60
+ if recognizer is not None:
61
+ self.offendingState = recognizer.state
62
+
63
+ # <p>If the state number is not known, this method returns -1.</p>
64
+
65
+ #
66
+ # Gets the set of input symbols which could potentially follow the
67
+ # previously matched symbol at the time this exception was thrown.
68
+ #
69
+ # <p>If the set of expected tokens is not known and could not be computed,
70
+ # this method returns {@code null}.</p>
71
+ #
72
+ # @return The set of token types that could potentially follow the current
73
+ # state in the ATN, or {@code null} if the information is not available.
74
+ #/
75
+ def getExpectedTokens(self):
76
+ if self.recognizer is not None:
77
+ return self.recognizer.atn.getExpectedTokens(self.offendingState, self.ctx)
78
+ else:
79
+ return None
80
+
81
+
82
+ class LexerNoViableAltException(RecognitionException):
83
+
84
+ def __init__(self, lexer:Lexer, input:InputStream, startIndex:int, deadEndConfigs:ATNConfigSet):
85
+ super().__init__(message=None, recognizer=lexer, input=input, ctx=None)
86
+ self.startIndex = startIndex
87
+ self.deadEndConfigs = deadEndConfigs
88
+
89
+ def __str__(self):
90
+ symbol = ""
91
+ if self.startIndex >= 0 and self.startIndex < self.input.size:
92
+ symbol = self.input.getText(self.startIndex, self.startIndex)
93
+ # TODO symbol = Utils.escapeWhitespace(symbol, false);
94
+ return "LexerNoViableAltException('" + symbol + "')"
95
+
96
+ # Indicates that the parser could not decide which of two or more paths
97
+ # to take based upon the remaining input. It tracks the starting token
98
+ # of the offending input and also knows where the parser was
99
+ # in the various paths when the error. Reported by reportNoViableAlternative()
100
+ #
101
+ class NoViableAltException(RecognitionException):
102
+
103
+ def __init__(self, recognizer:Parser, input:TokenStream=None, startToken:Token=None,
104
+ offendingToken:Token=None, deadEndConfigs:ATNConfigSet=None, ctx:ParserRuleContext=None):
105
+ if ctx is None:
106
+ ctx = recognizer._ctx
107
+ if offendingToken is None:
108
+ offendingToken = recognizer.getCurrentToken()
109
+ if startToken is None:
110
+ startToken = recognizer.getCurrentToken()
111
+ if input is None:
112
+ input = recognizer.getInputStream()
113
+ super().__init__(recognizer=recognizer, input=input, ctx=ctx)
114
+ # Which configurations did we try at input.index() that couldn't match input.LT(1)?#
115
+ self.deadEndConfigs = deadEndConfigs
116
+ # The token object at the start index; the input stream might
117
+ # not be buffering tokens so get a reference to it. (At the
118
+ # time the error occurred, of course the stream needs to keep a
119
+ # buffer all of the tokens but later we might not have access to those.)
120
+ self.startToken = startToken
121
+ self.offendingToken = offendingToken
122
+
123
+ # This signifies any kind of mismatched input exceptions such as
124
+ # when the current input does not match the expected token.
125
+ #
126
+ class InputMismatchException(RecognitionException):
127
+
128
+ def __init__(self, recognizer:Parser):
129
+ super().__init__(recognizer=recognizer, input=recognizer.getInputStream(), ctx=recognizer._ctx)
130
+ self.offendingToken = recognizer.getCurrentToken()
131
+
132
+
133
+ # A semantic predicate failed during validation. Validation of predicates
134
+ # occurs when normally parsing the alternative just like matching a token.
135
+ # Disambiguating predicate evaluation occurs when we test a predicate during
136
+ # prediction.
137
+
138
+ class FailedPredicateException(RecognitionException):
139
+
140
+ def __init__(self, recognizer:Parser, predicate:str=None, message:str=None):
141
+ super().__init__(message=self.formatMessage(predicate,message), recognizer=recognizer,
142
+ input=recognizer.getInputStream(), ctx=recognizer._ctx)
143
+ s = recognizer._interp.atn.states[recognizer.state]
144
+ trans = s.transitions[0]
145
+ from antlr4.atn.Transition import PredicateTransition
146
+ if isinstance(trans, PredicateTransition):
147
+ self.ruleIndex = trans.ruleIndex
148
+ self.predicateIndex = trans.predIndex
149
+ else:
150
+ self.ruleIndex = 0
151
+ self.predicateIndex = 0
152
+ self.predicate = predicate
153
+ self.offendingToken = recognizer.getCurrentToken()
154
+
155
+ def formatMessage(self, predicate:str, message:str):
156
+ if message is not None:
157
+ return message
158
+ else:
159
+ return "failed predicate: {" + predicate + "}?"
160
+
161
+ class ParseCancellationException(CancellationException):
162
+
163
+ pass
164
+
165
+ del Token
166
+ del Lexer
167
+ del Parser
168
+ del TokenStream
169
+ del ATNConfigSet
170
+ del ParserRulecontext
171
+ del PredicateTransition
172
+ del BufferedTokenStream
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/error/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __author__ = 'ericvergnaud'
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Chunk.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ class Chunk(object):
8
+ pass
9
+
10
+ class TagChunk(Chunk):
11
+ __slots__ = ('tag', 'label')
12
+
13
+ def __init__(self, tag:str, label:str=None):
14
+ self.tag = tag
15
+ self.label = label
16
+
17
+ def __str__(self):
18
+ if self.label is None:
19
+ return self.tag
20
+ else:
21
+ return self.label + ":" + self.tag
22
+
23
+ class TextChunk(Chunk):
24
+ __slots__ = 'text'
25
+
26
+ def __init__(self, text:str):
27
+ self.text = text
28
+
29
+ def __str__(self):
30
+ return "'" + self.text + "'"
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreeMatch.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+
8
+ #
9
+ # Represents the result of matching a {@link ParseTree} against a tree pattern.
10
+ #
11
+ from io import StringIO
12
+ from antlr4.tree.ParseTreePattern import ParseTreePattern
13
+ from antlr4.tree.Tree import ParseTree
14
+
15
+
16
+ class ParseTreeMatch(object):
17
+ __slots__ = ('tree', 'pattern', 'labels', 'mismatchedNode')
18
+ #
19
+ # Constructs a new instance of {@link ParseTreeMatch} from the specified
20
+ # parse tree and pattern.
21
+ #
22
+ # @param tree The parse tree to match against the pattern.
23
+ # @param pattern The parse tree pattern.
24
+ # @param labels A mapping from label names to collections of
25
+ # {@link ParseTree} objects located by the tree pattern matching process.
26
+ # @param mismatchedNode The first node which failed to match the tree
27
+ # pattern during the matching process.
28
+ #
29
+ # @exception IllegalArgumentException if {@code tree} is {@code null}
30
+ # @exception IllegalArgumentException if {@code pattern} is {@code null}
31
+ # @exception IllegalArgumentException if {@code labels} is {@code null}
32
+ #
33
+ def __init__(self, tree:ParseTree, pattern:ParseTreePattern, labels:dict, mismatchedNode:ParseTree):
34
+ if tree is None:
35
+ raise Exception("tree cannot be null")
36
+ if pattern is None:
37
+ raise Exception("pattern cannot be null")
38
+ if labels is None:
39
+ raise Exception("labels cannot be null")
40
+ self.tree = tree
41
+ self.pattern = pattern
42
+ self.labels = labels
43
+ self.mismatchedNode = mismatchedNode
44
+
45
+ #
46
+ # Get the last node associated with a specific {@code label}.
47
+ #
48
+ # <p>For example, for pattern {@code <id:ID>}, {@code get("id")} returns the
49
+ # node matched for that {@code ID}. If more than one node
50
+ # matched the specified label, only the last is returned. If there is
51
+ # no node associated with the label, this returns {@code null}.</p>
52
+ #
53
+ # <p>Pattern tags like {@code <ID>} and {@code <expr>} without labels are
54
+ # considered to be labeled with {@code ID} and {@code expr}, respectively.</p>
55
+ #
56
+ # @param label The label to check.
57
+ #
58
+ # @return The last {@link ParseTree} to match a tag with the specified
59
+ # label, or {@code null} if no parse tree matched a tag with the label.
60
+ #
61
+ def get(self, label:str):
62
+ parseTrees = self.labels.get(label, None)
63
+ if parseTrees is None or len(parseTrees)==0:
64
+ return None
65
+ else:
66
+ return parseTrees[len(parseTrees)-1]
67
+
68
+ #
69
+ # Return all nodes matching a rule or token tag with the specified label.
70
+ #
71
+ # <p>If the {@code label} is the name of a parser rule or token in the
72
+ # grammar, the resulting list will contain both the parse trees matching
73
+ # rule or tags explicitly labeled with the label and the complete set of
74
+ # parse trees matching the labeled and unlabeled tags in the pattern for
75
+ # the parser rule or token. For example, if {@code label} is {@code "foo"},
76
+ # the result will contain <em>all</em> of the following.</p>
77
+ #
78
+ # <ul>
79
+ # <li>Parse tree nodes matching tags of the form {@code <foo:anyRuleName>} and
80
+ # {@code <foo:AnyTokenName>}.</li>
81
+ # <li>Parse tree nodes matching tags of the form {@code <anyLabel:foo>}.</li>
82
+ # <li>Parse tree nodes matching tags of the form {@code <foo>}.</li>
83
+ # </ul>
84
+ #
85
+ # @param label The label.
86
+ #
87
+ # @return A collection of all {@link ParseTree} nodes matching tags with
88
+ # the specified {@code label}. If no nodes matched the label, an empty list
89
+ # is returned.
90
+ #
91
+ def getAll(self, label:str):
92
+ nodes = self.labels.get(label, None)
93
+ if nodes is None:
94
+ return list()
95
+ else:
96
+ return nodes
97
+
98
+
99
+ #
100
+ # Gets a value indicating whether the match operation succeeded.
101
+ #
102
+ # @return {@code true} if the match operation succeeded; otherwise,
103
+ # {@code false}.
104
+ #
105
+ def succeeded(self):
106
+ return self.mismatchedNode is None
107
+
108
+ #
109
+ # {@inheritDoc}
110
+ #
111
+ def __str__(self):
112
+ with StringIO() as buf:
113
+ buf.write("Match ")
114
+ buf.write("succeeded" if self.succeeded() else "failed")
115
+ buf.write("; found ")
116
+ buf.write(str(len(self.labels)))
117
+ buf.write(" labels")
118
+ return buf.getvalue()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreePattern.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ #
8
+ # A pattern like {@code <ID> = <expr>;} converted to a {@link ParseTree} by
9
+ # {@link ParseTreePatternMatcher#compile(String, int)}.
10
+ #
11
+ from antlr4.tree.ParseTreePatternMatcher import ParseTreePatternMatcher
12
+ from antlr4.tree.Tree import ParseTree
13
+ from antlr4.xpath.XPath import XPath
14
+
15
+
16
+ class ParseTreePattern(object):
17
+ __slots__ = ('matcher', 'patternRuleIndex', 'pattern', 'patternTree')
18
+
19
+ # Construct a new instance of the {@link ParseTreePattern} class.
20
+ #
21
+ # @param matcher The {@link ParseTreePatternMatcher} which created this
22
+ # tree pattern.
23
+ # @param pattern The tree pattern in concrete syntax form.
24
+ # @param patternRuleIndex The parser rule which serves as the root of the
25
+ # tree pattern.
26
+ # @param patternTree The tree pattern in {@link ParseTree} form.
27
+ #
28
+ def __init__(self, matcher:ParseTreePatternMatcher, pattern:str, patternRuleIndex:int , patternTree:ParseTree):
29
+ self.matcher = matcher
30
+ self.patternRuleIndex = patternRuleIndex
31
+ self.pattern = pattern
32
+ self.patternTree = patternTree
33
+
34
+ #
35
+ # Match a specific parse tree against this tree pattern.
36
+ #
37
+ # @param tree The parse tree to match against this tree pattern.
38
+ # @return A {@link ParseTreeMatch} object describing the result of the
39
+ # match operation. The {@link ParseTreeMatch#succeeded()} method can be
40
+ # used to determine whether or not the match was successful.
41
+ #
42
+ def match(self, tree:ParseTree):
43
+ return self.matcher.match(tree, self)
44
+
45
+ #
46
+ # Determine whether or not a parse tree matches this tree pattern.
47
+ #
48
+ # @param tree The parse tree to match against this tree pattern.
49
+ # @return {@code true} if {@code tree} is a match for the current tree
50
+ # pattern; otherwise, {@code false}.
51
+ #
52
+ def matches(self, tree:ParseTree):
53
+ return self.matcher.match(tree, self).succeeded()
54
+
55
+ # Find all nodes using XPath and then try to match those subtrees against
56
+ # this tree pattern.
57
+ #
58
+ # @param tree The {@link ParseTree} to match against this pattern.
59
+ # @param xpath An expression matching the nodes
60
+ #
61
+ # @return A collection of {@link ParseTreeMatch} objects describing the
62
+ # successful matches. Unsuccessful matches are omitted from the result,
63
+ # regardless of the reason for the failure.
64
+ #
65
+ def findAll(self, tree:ParseTree, xpath:str):
66
+ subtrees = XPath.findAll(tree, xpath, self.matcher.parser)
67
+ matches = list()
68
+ for t in subtrees:
69
+ match = self.match(t)
70
+ if match.succeeded():
71
+ matches.append(match)
72
+ return matches
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/ParseTreePatternMatcher.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ #
8
+ # A tree pattern matching mechanism for ANTLR {@link ParseTree}s.
9
+ #
10
+ # <p>Patterns are strings of source input text with special tags representing
11
+ # token or rule references such as:</p>
12
+ #
13
+ # <p>{@code <ID> = <expr>;}</p>
14
+ #
15
+ # <p>Given a pattern start rule such as {@code statement}, this object constructs
16
+ # a {@link ParseTree} with placeholders for the {@code ID} and {@code expr}
17
+ # subtree. Then the {@link #match} routines can compare an actual
18
+ # {@link ParseTree} from a parse with this pattern. Tag {@code <ID>} matches
19
+ # any {@code ID} token and tag {@code <expr>} references the result of the
20
+ # {@code expr} rule (generally an instance of {@code ExprContext}.</p>
21
+ #
22
+ # <p>Pattern {@code x = 0;} is a similar pattern that matches the same pattern
23
+ # except that it requires the identifier to be {@code x} and the expression to
24
+ # be {@code 0}.</p>
25
+ #
26
+ # <p>The {@link #matches} routines return {@code true} or {@code false} based
27
+ # upon a match for the tree rooted at the parameter sent in. The
28
+ # {@link #match} routines return a {@link ParseTreeMatch} object that
29
+ # contains the parse tree, the parse tree pattern, and a map from tag name to
30
+ # matched nodes (more below). A subtree that fails to match, returns with
31
+ # {@link ParseTreeMatch#mismatchedNode} set to the first tree node that did not
32
+ # match.</p>
33
+ #
34
+ # <p>For efficiency, you can compile a tree pattern in string form to a
35
+ # {@link ParseTreePattern} object.</p>
36
+ #
37
+ # <p>See {@code TestParseTreeMatcher} for lots of examples.
38
+ # {@link ParseTreePattern} has two static helper methods:
39
+ # {@link ParseTreePattern#findAll} and {@link ParseTreePattern#match} that
40
+ # are easy to use but not super efficient because they create new
41
+ # {@link ParseTreePatternMatcher} objects each time and have to compile the
42
+ # pattern in string form before using it.</p>
43
+ #
44
+ # <p>The lexer and parser that you pass into the {@link ParseTreePatternMatcher}
45
+ # constructor are used to parse the pattern in string form. The lexer converts
46
+ # the {@code <ID> = <expr>;} into a sequence of four tokens (assuming lexer
47
+ # throws out whitespace or puts it on a hidden channel). Be aware that the
48
+ # input stream is reset for the lexer (but not the parser; a
49
+ # {@link ParserInterpreter} is created to parse the input.). Any user-defined
50
+ # fields you have put into the lexer might get changed when this mechanism asks
51
+ # it to scan the pattern string.</p>
52
+ #
53
+ # <p>Normally a parser does not accept token {@code <expr>} as a valid
54
+ # {@code expr} but, from the parser passed in, we create a special version of
55
+ # the underlying grammar representation (an {@link ATN}) that allows imaginary
56
+ # tokens representing rules ({@code <expr>}) to match entire rules. We call
57
+ # these <em>bypass alternatives</em>.</p>
58
+ #
59
+ # <p>Delimiters are {@code <} and {@code >}, with {@code \} as the escape string
60
+ # by default, but you can set them to whatever you want using
61
+ # {@link #setDelimiters}. You must escape both start and stop strings
62
+ # {@code \<} and {@code \>}.</p>
63
+ #
64
+ from antlr4.CommonTokenStream import CommonTokenStream
65
+ from antlr4.InputStream import InputStream
66
+ from antlr4.ParserRuleContext import ParserRuleContext
67
+ from antlr4.Lexer import Lexer
68
+ from antlr4.ListTokenSource import ListTokenSource
69
+ from antlr4.Token import Token
70
+ from antlr4.error.ErrorStrategy import BailErrorStrategy
71
+ from antlr4.error.Errors import RecognitionException, ParseCancellationException
72
+ from antlr4.tree.Chunk import TagChunk, TextChunk
73
+ from antlr4.tree.RuleTagToken import RuleTagToken
74
+ from antlr4.tree.TokenTagToken import TokenTagToken
75
+ from antlr4.tree.Tree import ParseTree, TerminalNode, RuleNode
76
+
77
+ # need forward declaration
78
+ Parser = None
79
+ ParseTreePattern = None
80
+
81
+ class CannotInvokeStartRule(Exception):
82
+
83
+ def __init__(self, e:Exception):
84
+ super().__init__(e)
85
+
86
+ class StartRuleDoesNotConsumeFullPattern(Exception):
87
+
88
+ pass
89
+
90
+
91
+ class ParseTreePatternMatcher(object):
92
+ __slots__ = ('lexer', 'parser', 'start', 'stop', 'escape')
93
+
94
+ # Constructs a {@link ParseTreePatternMatcher} or from a {@link Lexer} and
95
+ # {@link Parser} object. The lexer input stream is altered for tokenizing
96
+ # the tree patterns. The parser is used as a convenient mechanism to get
97
+ # the grammar name, plus token, rule names.
98
+ def __init__(self, lexer:Lexer, parser:Parser):
99
+ self.lexer = lexer
100
+ self.parser = parser
101
+ self.start = "<"
102
+ self.stop = ">"
103
+ self.escape = "\\" # e.g., \< and \> must escape BOTH!
104
+
105
+ # Set the delimiters used for marking rule and token tags within concrete
106
+ # syntax used by the tree pattern parser.
107
+ #
108
+ # @param start The start delimiter.
109
+ # @param stop The stop delimiter.
110
+ # @param escapeLeft The escape sequence to use for escaping a start or stop delimiter.
111
+ #
112
+ # @exception IllegalArgumentException if {@code start} is {@code null} or empty.
113
+ # @exception IllegalArgumentException if {@code stop} is {@code null} or empty.
114
+ #
115
+ def setDelimiters(self, start:str, stop:str, escapeLeft:str):
116
+ if start is None or len(start)==0:
117
+ raise Exception("start cannot be null or empty")
118
+ if stop is None or len(stop)==0:
119
+ raise Exception("stop cannot be null or empty")
120
+ self.start = start
121
+ self.stop = stop
122
+ self.escape = escapeLeft
123
+
124
+ # Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code tree}?#
125
+ def matchesRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int):
126
+ p = self.compileTreePattern(pattern, patternRuleIndex)
127
+ return self.matches(tree, p)
128
+
129
+ # Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in a
130
+ # compiled pattern instead of a string representation of a tree pattern.
131
+ #
132
+ def matchesPattern(self, tree:ParseTree, pattern:ParseTreePattern):
133
+ mismatchedNode = self.matchImpl(tree, pattern.patternTree, dict())
134
+ return mismatchedNode is None
135
+
136
+ #
137
+ # Compare {@code pattern} matched as rule {@code patternRuleIndex} against
138
+ # {@code tree} and return a {@link ParseTreeMatch} object that contains the
139
+ # matched elements, or the node at which the match failed.
140
+ #
141
+ def matchRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int):
142
+ p = self.compileTreePattern(pattern, patternRuleIndex)
143
+ return self.matchPattern(tree, p)
144
+
145
+ #
146
+ # Compare {@code pattern} matched against {@code tree} and return a
147
+ # {@link ParseTreeMatch} object that contains the matched elements, or the
148
+ # node at which the match failed. Pass in a compiled pattern instead of a
149
+ # string representation of a tree pattern.
150
+ #
151
+ def matchPattern(self, tree:ParseTree, pattern:ParseTreePattern):
152
+ labels = dict()
153
+ mismatchedNode = self.matchImpl(tree, pattern.patternTree, labels)
154
+ from antlr4.tree.ParseTreeMatch import ParseTreeMatch
155
+ return ParseTreeMatch(tree, pattern, labels, mismatchedNode)
156
+
157
+ #
158
+ # For repeated use of a tree pattern, compile it to a
159
+ # {@link ParseTreePattern} using this method.
160
+ #
161
+ def compileTreePattern(self, pattern:str, patternRuleIndex:int):
162
+ tokenList = self.tokenize(pattern)
163
+ tokenSrc = ListTokenSource(tokenList)
164
+ tokens = CommonTokenStream(tokenSrc)
165
+ from antlr4.ParserInterpreter import ParserInterpreter
166
+ parserInterp = ParserInterpreter(self.parser.grammarFileName, self.parser.tokenNames,
167
+ self.parser.ruleNames, self.parser.getATNWithBypassAlts(),tokens)
168
+ tree = None
169
+ try:
170
+ parserInterp.setErrorHandler(BailErrorStrategy())
171
+ tree = parserInterp.parse(patternRuleIndex)
172
+ except ParseCancellationException as e:
173
+ raise e.cause
174
+ except RecognitionException as e:
175
+ raise e
176
+ except Exception as e:
177
+ raise CannotInvokeStartRule(e)
178
+
179
+ # Make sure tree pattern compilation checks for a complete parse
180
+ if tokens.LA(1)!=Token.EOF:
181
+ raise StartRuleDoesNotConsumeFullPattern()
182
+
183
+ from antlr4.tree.ParseTreePattern import ParseTreePattern
184
+ return ParseTreePattern(self, pattern, patternRuleIndex, tree)
185
+
186
+ #
187
+ # Recursively walk {@code tree} against {@code patternTree}, filling
188
+ # {@code match.}{@link ParseTreeMatch#labels labels}.
189
+ #
190
+ # @return the first node encountered in {@code tree} which does not match
191
+ # a corresponding node in {@code patternTree}, or {@code null} if the match
192
+ # was successful. The specific node returned depends on the matching
193
+ # algorithm used by the implementation, and may be overridden.
194
+ #
195
+ def matchImpl(self, tree:ParseTree, patternTree:ParseTree, labels:dict):
196
+ if tree is None:
197
+ raise Exception("tree cannot be null")
198
+ if patternTree is None:
199
+ raise Exception("patternTree cannot be null")
200
+
201
+ # x and <ID>, x and y, or x and x; or could be mismatched types
202
+ if isinstance(tree, TerminalNode) and isinstance(patternTree, TerminalNode ):
203
+ mismatchedNode = None
204
+ # both are tokens and they have same type
205
+ if tree.symbol.type == patternTree.symbol.type:
206
+ if isinstance( patternTree.symbol, TokenTagToken ): # x and <ID>
207
+ tokenTagToken = patternTree.symbol
208
+ # track label->list-of-nodes for both token name and label (if any)
209
+ self.map(labels, tokenTagToken.tokenName, tree)
210
+ if tokenTagToken.label is not None:
211
+ self.map(labels, tokenTagToken.label, tree)
212
+ elif tree.getText()==patternTree.getText():
213
+ # x and x
214
+ pass
215
+ else:
216
+ # x and y
217
+ if mismatchedNode is None:
218
+ mismatchedNode = tree
219
+ else:
220
+ if mismatchedNode is None:
221
+ mismatchedNode = tree
222
+
223
+ return mismatchedNode
224
+
225
+ if isinstance(tree, ParserRuleContext) and isinstance(patternTree, ParserRuleContext):
226
+ mismatchedNode = None
227
+ # (expr ...) and <expr>
228
+ ruleTagToken = self.getRuleTagToken(patternTree)
229
+ if ruleTagToken is not None:
230
+ m = None
231
+ if tree.ruleContext.ruleIndex == patternTree.ruleContext.ruleIndex:
232
+ # track label->list-of-nodes for both rule name and label (if any)
233
+ self.map(labels, ruleTagToken.ruleName, tree)
234
+ if ruleTagToken.label is not None:
235
+ self.map(labels, ruleTagToken.label, tree)
236
+ else:
237
+ if mismatchedNode is None:
238
+ mismatchedNode = tree
239
+
240
+ return mismatchedNode
241
+
242
+ # (expr ...) and (expr ...)
243
+ if tree.getChildCount()!=patternTree.getChildCount():
244
+ if mismatchedNode is None:
245
+ mismatchedNode = tree
246
+ return mismatchedNode
247
+
248
+ n = tree.getChildCount()
249
+ for i in range(0, n):
250
+ childMatch = self.matchImpl(tree.getChild(i), patternTree.getChild(i), labels)
251
+ if childMatch is not None:
252
+ return childMatch
253
+
254
+ return mismatchedNode
255
+
256
+ # if nodes aren't both tokens or both rule nodes, can't match
257
+ return tree
258
+
259
+ def map(self, labels, label, tree):
260
+ v = labels.get(label, None)
261
+ if v is None:
262
+ v = list()
263
+ labels[label] = v
264
+ v.append(tree)
265
+
266
+ # Is {@code t} {@code (expr <expr>)} subtree?#
267
+ def getRuleTagToken(self, tree:ParseTree):
268
+ if isinstance( tree, RuleNode ):
269
+ if tree.getChildCount()==1 and isinstance(tree.getChild(0), TerminalNode ):
270
+ c = tree.getChild(0)
271
+ if isinstance( c.symbol, RuleTagToken ):
272
+ return c.symbol
273
+ return None
274
+
275
+ def tokenize(self, pattern:str):
276
+ # split pattern into chunks: sea (raw input) and islands (<ID>, <expr>)
277
+ chunks = self.split(pattern)
278
+
279
+ # create token stream from text and tags
280
+ tokens = list()
281
+ for chunk in chunks:
282
+ if isinstance( chunk, TagChunk ):
283
+ # add special rule token or conjure up new token from name
284
+ if chunk.tag[0].isupper():
285
+ ttype = self.parser.getTokenType(chunk.tag)
286
+ if ttype==Token.INVALID_TYPE:
287
+ raise Exception("Unknown token " + str(chunk.tag) + " in pattern: " + pattern)
288
+ tokens.append(TokenTagToken(chunk.tag, ttype, chunk.label))
289
+ elif chunk.tag[0].islower():
290
+ ruleIndex = self.parser.getRuleIndex(chunk.tag)
291
+ if ruleIndex==-1:
292
+ raise Exception("Unknown rule " + str(chunk.tag) + " in pattern: " + pattern)
293
+ ruleImaginaryTokenType = self.parser.getATNWithBypassAlts().ruleToTokenType[ruleIndex]
294
+ tokens.append(RuleTagToken(chunk.tag, ruleImaginaryTokenType, chunk.label))
295
+ else:
296
+ raise Exception("invalid tag: " + str(chunk.tag) + " in pattern: " + pattern)
297
+ else:
298
+ self.lexer.setInputStream(InputStream(chunk.text))
299
+ t = self.lexer.nextToken()
300
+ while t.type!=Token.EOF:
301
+ tokens.append(t)
302
+ t = self.lexer.nextToken()
303
+ return tokens
304
+
305
+ # Split {@code <ID> = <e:expr> ;} into 4 chunks for tokenizing by {@link #tokenize}.#
306
+ def split(self, pattern:str):
307
+ p = 0
308
+ n = len(pattern)
309
+ chunks = list()
310
+ # find all start and stop indexes first, then collect
311
+ starts = list()
312
+ stops = list()
313
+ while p < n :
314
+ if p == pattern.find(self.escape + self.start, p):
315
+ p += len(self.escape) + len(self.start)
316
+ elif p == pattern.find(self.escape + self.stop, p):
317
+ p += len(self.escape) + len(self.stop)
318
+ elif p == pattern.find(self.start, p):
319
+ starts.append(p)
320
+ p += len(self.start)
321
+ elif p == pattern.find(self.stop, p):
322
+ stops.append(p)
323
+ p += len(self.stop)
324
+ else:
325
+ p += 1
326
+
327
+ nt = len(starts)
328
+
329
+ if nt > len(stops):
330
+ raise Exception("unterminated tag in pattern: " + pattern)
331
+ if nt < len(stops):
332
+ raise Exception("missing start tag in pattern: " + pattern)
333
+
334
+ for i in range(0, nt):
335
+ if starts[i] >= stops[i]:
336
+ raise Exception("tag delimiters out of order in pattern: " + pattern)
337
+
338
+ # collect into chunks now
339
+ if nt==0:
340
+ chunks.append(TextChunk(pattern))
341
+
342
+ if nt>0 and starts[0]>0: # copy text up to first tag into chunks
343
+ text = pattern[0:starts[0]]
344
+ chunks.add(TextChunk(text))
345
+
346
+ for i in range(0, nt):
347
+ # copy inside of <tag>
348
+ tag = pattern[starts[i] + len(self.start) : stops[i]]
349
+ ruleOrToken = tag
350
+ label = None
351
+ colon = tag.find(':')
352
+ if colon >= 0:
353
+ label = tag[0:colon]
354
+ ruleOrToken = tag[colon+1 : len(tag)]
355
+ chunks.append(TagChunk(label, ruleOrToken))
356
+ if i+1 < len(starts):
357
+ # copy from end of <tag> to start of next
358
+ text = pattern[stops[i] + len(self.stop) : starts[i + 1]]
359
+ chunks.append(TextChunk(text))
360
+
361
+ if nt > 0 :
362
+ afterLastTag = stops[nt - 1] + len(self.stop)
363
+ if afterLastTag < n : # copy text from end of last tag to end
364
+ text = pattern[afterLastTag : n]
365
+ chunks.append(TextChunk(text))
366
+
367
+ # strip out the escape sequences from text chunks but not tags
368
+ for i in range(0, len(chunks)):
369
+ c = chunks[i]
370
+ if isinstance( c, TextChunk ):
371
+ unescaped = c.text.replace(self.escape, "")
372
+ if len(unescaped) < len(c.text):
373
+ chunks[i] = TextChunk(unescaped)
374
+ return chunks
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/RuleTagToken.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ #
8
+ # A {@link Token} object representing an entire subtree matched by a parser
9
+ # rule; e.g., {@code <expr>}. These tokens are created for {@link TagChunk}
10
+ # chunks where the tag corresponds to a parser rule.
11
+ #
12
+ from antlr4.Token import Token
13
+
14
+
15
+ class RuleTagToken(Token):
16
+ __slots__ = ('label', 'ruleName')
17
+ #
18
+ # Constructs a new instance of {@link RuleTagToken} with the specified rule
19
+ # name, bypass token type, and label.
20
+ #
21
+ # @param ruleName The name of the parser rule this rule tag matches.
22
+ # @param bypassTokenType The bypass token type assigned to the parser rule.
23
+ # @param label The label associated with the rule tag, or {@code null} if
24
+ # the rule tag is unlabeled.
25
+ #
26
+ # @exception IllegalArgumentException if {@code ruleName} is {@code null}
27
+ # or empty.
28
+
29
+ def __init__(self, ruleName:str, bypassTokenType:int, label:str=None):
30
+ if ruleName is None or len(ruleName)==0:
31
+ raise Exception("ruleName cannot be null or empty.")
32
+ self.source = None
33
+ self.type = bypassTokenType # token type of the token
34
+ self.channel = Token.DEFAULT_CHANNEL # The parser ignores everything not on DEFAULT_CHANNEL
35
+ self.start = -1 # optional; return -1 if not implemented.
36
+ self.stop = -1 # optional; return -1 if not implemented.
37
+ self.tokenIndex = -1 # from 0..n-1 of the token object in the input stream
38
+ self.line = 0 # line=1..n of the 1st character
39
+ self.column = -1 # beginning of the line at which it occurs, 0..n-1
40
+ self.label = label
41
+ self._text = self.getText() # text of the token.
42
+
43
+ self.ruleName = ruleName
44
+
45
+
46
+ def getText(self):
47
+ if self.label is None:
48
+ return "<" + self.ruleName + ">"
49
+ else:
50
+ return "<" + self.label + ":" + self.ruleName + ">"
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/TokenTagToken.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ #
8
+ # A {@link Token} object representing a token of a particular type; e.g.,
9
+ # {@code <ID>}. These tokens are created for {@link TagChunk} chunks where the
10
+ # tag corresponds to a lexer rule or token type.
11
+ #
12
+ from antlr4.Token import CommonToken
13
+
14
+
15
+ class TokenTagToken(CommonToken):
16
+ __slots__ = ('tokenName', 'label')
17
+ # Constructs a new instance of {@link TokenTagToken} with the specified
18
+ # token name, type, and label.
19
+ #
20
+ # @param tokenName The token name.
21
+ # @param type The token type.
22
+ # @param label The label associated with the token tag, or {@code null} if
23
+ # the token tag is unlabeled.
24
+ #
25
+ def __init__(self, tokenName:str, type:int, label:str=None):
26
+ super().__init__(type=type)
27
+ self.tokenName = tokenName
28
+ self.label = label
29
+ self._text = self.getText()
30
+
31
+ #
32
+ # {@inheritDoc}
33
+ #
34
+ # <p>The implementation for {@link TokenTagToken} returns the token tag
35
+ # formatted with {@code <} and {@code >} delimiters.</p>
36
+ #
37
+ def getText(self):
38
+ if self.label is None:
39
+ return "<" + self.tokenName + ">"
40
+ else:
41
+ return "<" + self.label + ":" + self.tokenName + ">"
42
+
43
+ # <p>The implementation for {@link TokenTagToken} returns a string of the form
44
+ # {@code tokenName:type}.</p>
45
+ #
46
+ def __str__(self):
47
+ return self.tokenName + ":" + str(self.type)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Tree.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
2
+ # Use of this file is governed by the BSD 3-clause license that
3
+ # can be found in the LICENSE.txt file in the project root.
4
+ #/
5
+
6
+
7
+ # The basic notion of a tree has a parent, a payload, and a list of children.
8
+ # It is the most abstract interface for all the trees used by ANTLR.
9
+ #/
10
+ from antlr4.Token import Token
11
+
12
+ INVALID_INTERVAL = (-1, -2)
13
+
14
+ class Tree(object):
15
+ pass
16
+
17
+ class SyntaxTree(Tree):
18
+ pass
19
+
20
+ class ParseTree(SyntaxTree):
21
+ pass
22
+
23
+ class RuleNode(ParseTree):
24
+ pass
25
+
26
+ class TerminalNode(ParseTree):
27
+ pass
28
+
29
+ class ErrorNode(TerminalNode):
30
+ pass
31
+
32
+ class ParseTreeVisitor(object):
33
+ def visit(self, tree):
34
+ return tree.accept(self)
35
+
36
+ def visitChildren(self, node):
37
+ result = self.defaultResult()
38
+ n = node.getChildCount()
39
+ for i in range(n):
40
+ if not self.shouldVisitNextChild(node, result):
41
+ return result
42
+
43
+ c = node.getChild(i)
44
+ childResult = c.accept(self)
45
+ result = self.aggregateResult(result, childResult)
46
+
47
+ return result
48
+
49
+ def visitTerminal(self, node):
50
+ return self.defaultResult()
51
+
52
+ def visitErrorNode(self, node):
53
+ return self.defaultResult()
54
+
55
+ def defaultResult(self):
56
+ return None
57
+
58
+ def aggregateResult(self, aggregate, nextResult):
59
+ return nextResult
60
+
61
+ def shouldVisitNextChild(self, node, currentResult):
62
+ return True
63
+
64
+ ParserRuleContext = None
65
+
66
+ class ParseTreeListener(object):
67
+
68
+ def visitTerminal(self, node:TerminalNode):
69
+ pass
70
+
71
+ def visitErrorNode(self, node:ErrorNode):
72
+ pass
73
+
74
+ def enterEveryRule(self, ctx:ParserRuleContext):
75
+ pass
76
+
77
+ def exitEveryRule(self, ctx:ParserRuleContext):
78
+ pass
79
+
80
+ del ParserRuleContext
81
+
82
+ class TerminalNodeImpl(TerminalNode):
83
+ __slots__ = ('parentCtx', 'symbol')
84
+
85
+ def __init__(self, symbol:Token):
86
+ self.parentCtx = None
87
+ self.symbol = symbol
88
+ def __setattr__(self, key, value):
89
+ super().__setattr__(key, value)
90
+
91
+ def getChild(self, i:int):
92
+ return None
93
+
94
+ def getSymbol(self):
95
+ return self.symbol
96
+
97
+ def getParent(self):
98
+ return self.parentCtx
99
+
100
+ def getPayload(self):
101
+ return self.symbol
102
+
103
+ def getSourceInterval(self):
104
+ if self.symbol is None:
105
+ return INVALID_INTERVAL
106
+ tokenIndex = self.symbol.tokenIndex
107
+ return (tokenIndex, tokenIndex)
108
+
109
+ def getChildCount(self):
110
+ return 0
111
+
112
+ def accept(self, visitor:ParseTreeVisitor):
113
+ return visitor.visitTerminal(self)
114
+
115
+ def getText(self):
116
+ return self.symbol.text
117
+
118
+ def __str__(self):
119
+ if self.symbol.type == Token.EOF:
120
+ return "<EOF>"
121
+ else:
122
+ return self.symbol.text
123
+
124
+ # Represents a token that was consumed during resynchronization
125
+ # rather than during a valid match operation. For example,
126
+ # we will create this kind of a node during single token insertion
127
+ # and deletion as well as during "consume until error recovery set"
128
+ # upon no viable alternative exceptions.
129
+
130
+ class ErrorNodeImpl(TerminalNodeImpl,ErrorNode):
131
+
132
+ def __init__(self, token:Token):
133
+ super().__init__(token)
134
+
135
+ def accept(self, visitor:ParseTreeVisitor):
136
+ return visitor.visitErrorNode(self)
137
+
138
+
139
+ class ParseTreeWalker(object):
140
+
141
+ DEFAULT = None
142
+
143
+ def walk(self, listener:ParseTreeListener, t:ParseTree):
144
+ """
145
+ Performs a walk on the given parse tree starting at the root and going down recursively
146
+ with depth-first search. On each node, {@link ParseTreeWalker#enterRule} is called before
147
+ recursively walking down into child nodes, then
148
+ {@link ParseTreeWalker#exitRule} is called after the recursive call to wind up.
149
+ @param listener The listener used by the walker to process grammar rules
150
+ @param t The parse tree to be walked on
151
+ """
152
+ if isinstance(t, ErrorNode):
153
+ listener.visitErrorNode(t)
154
+ return
155
+ elif isinstance(t, TerminalNode):
156
+ listener.visitTerminal(t)
157
+ return
158
+ self.enterRule(listener, t)
159
+ for child in t.getChildren():
160
+ self.walk(listener, child)
161
+ self.exitRule(listener, t)
162
+
163
+ #
164
+ # The discovery of a rule node, involves sending two events: the generic
165
+ # {@link ParseTreeListener#enterEveryRule} and a
166
+ # {@link RuleContext}-specific event. First we trigger the generic and then
167
+ # the rule specific. We to them in reverse order upon finishing the node.
168
+ #
169
+ def enterRule(self, listener:ParseTreeListener, r:RuleNode):
170
+ """
171
+ Enters a grammar rule by first triggering the generic event {@link ParseTreeListener#enterEveryRule}
172
+ then by triggering the event specific to the given parse tree node
173
+ @param listener The listener responding to the trigger events
174
+ @param r The grammar rule containing the rule context
175
+ """
176
+ ctx = r.getRuleContext()
177
+ listener.enterEveryRule(ctx)
178
+ ctx.enterRule(listener)
179
+
180
+ def exitRule(self, listener:ParseTreeListener, r:RuleNode):
181
+ """
182
+ Exits a grammar rule by first triggering the event specific to the given parse tree node
183
+ then by triggering the generic event {@link ParseTreeListener#exitEveryRule}
184
+ @param listener The listener responding to the trigger events
185
+ @param r The grammar rule containing the rule context
186
+ """
187
+ ctx = r.getRuleContext()
188
+ ctx.exitRule(listener)
189
+ listener.exitEveryRule(ctx)
190
+
191
+ ParseTreeWalker.DEFAULT = ParseTreeWalker()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/Trees.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+
8
+ # A set of utility routines useful for all kinds of ANTLR trees.#
9
+ from io import StringIO
10
+ from antlr4.Token import Token
11
+ from antlr4.Utils import escapeWhitespace
12
+ from antlr4.tree.Tree import RuleNode, ErrorNode, TerminalNode, Tree, ParseTree
13
+
14
+ # need forward declaration
15
+ Parser = None
16
+
17
+ class Trees(object):
18
+
19
+ # Print out a whole tree in LISP form. {@link #getNodeText} is used on the
20
+ # node payloads to get the text for the nodes. Detect
21
+ # parse trees and extract data appropriately.
22
+ @classmethod
23
+ def toStringTree(cls, t:Tree, ruleNames:list=None, recog:Parser=None):
24
+ if recog is not None:
25
+ ruleNames = recog.ruleNames
26
+ s = escapeWhitespace(cls.getNodeText(t, ruleNames), False)
27
+ if t.getChildCount()==0:
28
+ return s
29
+ with StringIO() as buf:
30
+ buf.write("(")
31
+ buf.write(s)
32
+ buf.write(' ')
33
+ for i in range(0, t.getChildCount()):
34
+ if i > 0:
35
+ buf.write(' ')
36
+ buf.write(cls.toStringTree(t.getChild(i), ruleNames))
37
+ buf.write(")")
38
+ return buf.getvalue()
39
+
40
+ @classmethod
41
+ def getNodeText(cls, t:Tree, ruleNames:list=None, recog:Parser=None):
42
+ if recog is not None:
43
+ ruleNames = recog.ruleNames
44
+ if ruleNames is not None:
45
+ if isinstance(t, RuleNode):
46
+ if t.getAltNumber()!=0: # should use ATN.INVALID_ALT_NUMBER but won't compile
47
+ return ruleNames[t.getRuleIndex()]+":"+str(t.getAltNumber())
48
+ return ruleNames[t.getRuleIndex()]
49
+ elif isinstance( t, ErrorNode):
50
+ return str(t)
51
+ elif isinstance(t, TerminalNode):
52
+ if t.symbol is not None:
53
+ return t.symbol.text
54
+ # no recog for rule names
55
+ payload = t.getPayload()
56
+ if isinstance(payload, Token ):
57
+ return payload.text
58
+ return str(t.getPayload())
59
+
60
+
61
+ # Return ordered list of all children of this node
62
+ @classmethod
63
+ def getChildren(cls, t:Tree):
64
+ return [ t.getChild(i) for i in range(0, t.getChildCount()) ]
65
+
66
+ # Return a list of all ancestors of this node. The first node of
67
+ # list is the root and the last is the parent of this node.
68
+ #
69
+ @classmethod
70
+ def getAncestors(cls, t:Tree):
71
+ ancestors = []
72
+ t = t.getParent()
73
+ while t is not None:
74
+ ancestors.insert(0, t) # insert at start
75
+ t = t.getParent()
76
+ return ancestors
77
+
78
+ @classmethod
79
+ def findAllTokenNodes(cls, t:ParseTree, ttype:int):
80
+ return cls.findAllNodes(t, ttype, True)
81
+
82
+ @classmethod
83
+ def findAllRuleNodes(cls, t:ParseTree, ruleIndex:int):
84
+ return cls.findAllNodes(t, ruleIndex, False)
85
+
86
+ @classmethod
87
+ def findAllNodes(cls, t:ParseTree, index:int, findTokens:bool):
88
+ nodes = []
89
+ cls._findAllNodes(t, index, findTokens, nodes)
90
+ return nodes
91
+
92
+ @classmethod
93
+ def _findAllNodes(cls, t:ParseTree, index:int, findTokens:bool, nodes:list):
94
+ from antlr4.ParserRuleContext import ParserRuleContext
95
+ # check this node (the root) first
96
+ if findTokens and isinstance(t, TerminalNode):
97
+ if t.symbol.type==index:
98
+ nodes.append(t)
99
+ elif not findTokens and isinstance(t, ParserRuleContext):
100
+ if t.ruleIndex == index:
101
+ nodes.append(t)
102
+ # check children
103
+ for i in range(0, t.getChildCount()):
104
+ cls._findAllNodes(t.getChild(i), index, findTokens, nodes)
105
+
106
+ @classmethod
107
+ def descendants(cls, t:ParseTree):
108
+ nodes = [t]
109
+ for i in range(0, t.getChildCount()):
110
+ nodes.extend(cls.descendants(t.getChild(i)))
111
+ return nodes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__init__.py ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/Chunk.cpython-38.pyc ADDED
Binary file (1.2 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/RuleTagToken.cpython-38.pyc ADDED
Binary file (1.07 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/tree/__pycache__/Tree.cpython-38.pyc ADDED
Binary file (7.53 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/xpath/XPath.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ #
8
+ # Represent a subset of XPath XML path syntax for use in identifying nodes in
9
+ # parse trees.
10
+ #
11
+ # <p>
12
+ # Split path into words and separators {@code /} and {@code //} via ANTLR
13
+ # itself then walk path elements from left to right. At each separator-word
14
+ # pair, find set of nodes. Next stage uses those as work list.</p>
15
+ #
16
+ # <p>
17
+ # The basic interface is
18
+ # {@link XPath#findAll ParseTree.findAll}{@code (tree, pathString, parser)}.
19
+ # But that is just shorthand for:</p>
20
+ #
21
+ # <pre>
22
+ # {@link XPath} p = new {@link XPath#XPath XPath}(parser, pathString);
23
+ # return p.{@link #evaluate evaluate}(tree);
24
+ # </pre>
25
+ #
26
+ # <p>
27
+ # See {@code org.antlr.v4.test.TestXPath} for descriptions. In short, this
28
+ # allows operators:</p>
29
+ #
30
+ # <dl>
31
+ # <dt>/</dt> <dd>root</dd>
32
+ # <dt>//</dt> <dd>anywhere</dd>
33
+ # <dt>!</dt> <dd>invert; this must appear directly after root or anywhere
34
+ # operator</dd>
35
+ # </dl>
36
+ #
37
+ # <p>
38
+ # and path elements:</p>
39
+ #
40
+ # <dl>
41
+ # <dt>ID</dt> <dd>token name</dd>
42
+ # <dt>'string'</dt> <dd>any string literal token from the grammar</dd>
43
+ # <dt>expr</dt> <dd>rule name</dd>
44
+ # <dt>*</dt> <dd>wildcard matching any node</dd>
45
+ # </dl>
46
+ #
47
+ # <p>
48
+ # Whitespace is not allowed.</p>
49
+ #
50
+ from antlr4 import CommonTokenStream, DFA, PredictionContextCache, Lexer, LexerATNSimulator, ParserRuleContext, TerminalNode
51
+ from antlr4.InputStream import InputStream
52
+ from antlr4.Parser import Parser
53
+ from antlr4.RuleContext import RuleContext
54
+ from antlr4.Token import Token
55
+ from antlr4.atn.ATNDeserializer import ATNDeserializer
56
+ from antlr4.error.ErrorListener import ErrorListener
57
+ from antlr4.error.Errors import LexerNoViableAltException
58
+ from antlr4.tree.Tree import ParseTree
59
+ from antlr4.tree.Trees import Trees
60
+ from io import StringIO
61
+
62
+
63
+ def serializedATN():
64
+ with StringIO() as buf:
65
+ buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\n")
66
+ buf.write("\64\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
67
+ buf.write("\7\4\b\t\b\4\t\t\t\3\2\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5")
68
+ buf.write("\3\6\3\6\7\6\37\n\6\f\6\16\6\"\13\6\3\6\3\6\3\7\3\7\5")
69
+ buf.write("\7(\n\7\3\b\3\b\3\t\3\t\7\t.\n\t\f\t\16\t\61\13\t\3\t")
70
+ buf.write("\3\t\3/\2\n\3\5\5\6\7\7\t\b\13\t\r\2\17\2\21\n\3\2\4\7")
71
+ buf.write("\2\62;aa\u00b9\u00b9\u0302\u0371\u2041\u2042\17\2C\\c")
72
+ buf.write("|\u00c2\u00d8\u00da\u00f8\u00fa\u0301\u0372\u037f\u0381")
73
+ buf.write("\u2001\u200e\u200f\u2072\u2191\u2c02\u2ff1\u3003\ud801")
74
+ buf.write("\uf902\ufdd1\ufdf2\uffff\64\2\3\3\2\2\2\2\5\3\2\2\2\2")
75
+ buf.write("\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\21\3\2\2\2\3\23")
76
+ buf.write("\3\2\2\2\5\26\3\2\2\2\7\30\3\2\2\2\t\32\3\2\2\2\13\34")
77
+ buf.write("\3\2\2\2\r\'\3\2\2\2\17)\3\2\2\2\21+\3\2\2\2\23\24\7\61")
78
+ buf.write("\2\2\24\25\7\61\2\2\25\4\3\2\2\2\26\27\7\61\2\2\27\6\3")
79
+ buf.write("\2\2\2\30\31\7,\2\2\31\b\3\2\2\2\32\33\7#\2\2\33\n\3\2")
80
+ buf.write("\2\2\34 \5\17\b\2\35\37\5\r\7\2\36\35\3\2\2\2\37\"\3\2")
81
+ buf.write("\2\2 \36\3\2\2\2 !\3\2\2\2!#\3\2\2\2\" \3\2\2\2#$\b\6")
82
+ buf.write("\2\2$\f\3\2\2\2%(\5\17\b\2&(\t\2\2\2\'%\3\2\2\2\'&\3\2")
83
+ buf.write("\2\2(\16\3\2\2\2)*\t\3\2\2*\20\3\2\2\2+/\7)\2\2,.\13\2")
84
+ buf.write("\2\2-,\3\2\2\2.\61\3\2\2\2/\60\3\2\2\2/-\3\2\2\2\60\62")
85
+ buf.write("\3\2\2\2\61/\3\2\2\2\62\63\7)\2\2\63\22\3\2\2\2\6\2 \'")
86
+ buf.write("/\3\3\6\2")
87
+ return buf.getvalue()
88
+
89
+
90
+ class XPathLexer(Lexer):
91
+
92
+ atn = ATNDeserializer().deserialize(serializedATN())
93
+
94
+ decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
95
+
96
+
97
+ TOKEN_REF = 1
98
+ RULE_REF = 2
99
+ ANYWHERE = 3
100
+ ROOT = 4
101
+ WILDCARD = 5
102
+ BANG = 6
103
+ ID = 7
104
+ STRING = 8
105
+
106
+ modeNames = [ "DEFAULT_MODE" ]
107
+
108
+ literalNames = [ "<INVALID>",
109
+ "'//'", "'/'", "'*'", "'!'" ]
110
+
111
+ symbolicNames = [ "<INVALID>",
112
+ "TOKEN_REF", "RULE_REF", "ANYWHERE", "ROOT", "WILDCARD", "BANG",
113
+ "ID", "STRING" ]
114
+
115
+ ruleNames = [ "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID", "NameChar",
116
+ "NameStartChar", "STRING" ]
117
+
118
+ grammarFileName = "XPathLexer.g4"
119
+
120
+ def __init__(self, input=None):
121
+ super().__init__(input)
122
+ self.checkVersion("4.9.1")
123
+ self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
124
+ self._actions = None
125
+ self._predicates = None
126
+
127
+
128
+ def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int):
129
+ if self._actions is None:
130
+ actions = dict()
131
+ actions[4] = self.ID_action
132
+ self._actions = actions
133
+ _action = self._actions.get(ruleIndex, None)
134
+ if _action is not None:
135
+ _action(localctx, actionIndex)
136
+ else:
137
+ raise Exception("No registered action for: %d" % ruleIndex)
138
+
139
+ def ID_action(self, localctx:RuleContext , actionIndex:int):
140
+ if actionIndex == 0:
141
+ char = self.text[0]
142
+ if char.isupper():
143
+ self.type = XPathLexer.TOKEN_REF
144
+ else:
145
+ self.type = XPathLexer.RULE_REF
146
+
147
+ class XPath(object):
148
+
149
+ WILDCARD = "*" # word not operator/separator
150
+ NOT = "!" # word for invert operator
151
+
152
+ def __init__(self, parser:Parser, path:str):
153
+ self.parser = parser
154
+ self.path = path
155
+ self.elements = self.split(path)
156
+
157
+ def split(self, path:str):
158
+ input = InputStream(path)
159
+ lexer = XPathLexer(input)
160
+ def recover(self, e):
161
+ raise e
162
+ lexer.recover = recover
163
+ lexer.removeErrorListeners()
164
+ lexer.addErrorListener(ErrorListener()) # XPathErrorListener does no more
165
+ tokenStream = CommonTokenStream(lexer)
166
+ try:
167
+ tokenStream.fill()
168
+ except LexerNoViableAltException as e:
169
+ pos = lexer.column
170
+ msg = "Invalid tokens or characters at index %d in path '%s'" % (pos, path)
171
+ raise Exception(msg, e)
172
+
173
+ tokens = iter(tokenStream.tokens)
174
+ elements = list()
175
+ for el in tokens:
176
+ invert = False
177
+ anywhere = False
178
+ # Check for path separators, if none assume root
179
+ if el.type in [XPathLexer.ROOT, XPathLexer.ANYWHERE]:
180
+ anywhere = el.type == XPathLexer.ANYWHERE
181
+ next_el = next(tokens, None)
182
+ if not next_el:
183
+ raise Exception('Missing element after %s' % el.getText())
184
+ else:
185
+ el = next_el
186
+ # Check for bangs
187
+ if el.type == XPathLexer.BANG:
188
+ invert = True
189
+ next_el = next(tokens, None)
190
+ if not next_el:
191
+ raise Exception('Missing element after %s' % el.getText())
192
+ else:
193
+ el = next_el
194
+ # Add searched element
195
+ if el.type in [XPathLexer.TOKEN_REF, XPathLexer.RULE_REF, XPathLexer.WILDCARD, XPathLexer.STRING]:
196
+ element = self.getXPathElement(el, anywhere)
197
+ element.invert = invert
198
+ elements.append(element)
199
+ elif el.type==Token.EOF:
200
+ break
201
+ else:
202
+ raise Exception("Unknown path element %s" % lexer.symbolicNames[el.type])
203
+ return elements
204
+
205
+ #
206
+ # Convert word like {@code#} or {@code ID} or {@code expr} to a path
207
+ # element. {@code anywhere} is {@code true} if {@code //} precedes the
208
+ # word.
209
+ #
210
+ def getXPathElement(self, wordToken:Token, anywhere:bool):
211
+ if wordToken.type==Token.EOF:
212
+ raise Exception("Missing path element at end of path")
213
+
214
+ word = wordToken.text
215
+ if wordToken.type==XPathLexer.WILDCARD :
216
+ return XPathWildcardAnywhereElement() if anywhere else XPathWildcardElement()
217
+
218
+ elif wordToken.type in [XPathLexer.TOKEN_REF, XPathLexer.STRING]:
219
+ tsource = self.parser.getTokenStream().tokenSource
220
+
221
+ ttype = Token.INVALID_TYPE
222
+ if wordToken.type == XPathLexer.TOKEN_REF:
223
+ if word in tsource.ruleNames:
224
+ ttype = tsource.ruleNames.index(word) + 1
225
+ else:
226
+ if word in tsource.literalNames:
227
+ ttype = tsource.literalNames.index(word)
228
+
229
+ if ttype == Token.INVALID_TYPE:
230
+ raise Exception("%s at index %d isn't a valid token name" % (word, wordToken.tokenIndex))
231
+ return XPathTokenAnywhereElement(word, ttype) if anywhere else XPathTokenElement(word, ttype)
232
+
233
+ else:
234
+ ruleIndex = self.parser.ruleNames.index(word) if word in self.parser.ruleNames else -1
235
+
236
+ if ruleIndex == -1:
237
+ raise Exception("%s at index %d isn't a valid rule name" % (word, wordToken.tokenIndex))
238
+ return XPathRuleAnywhereElement(word, ruleIndex) if anywhere else XPathRuleElement(word, ruleIndex)
239
+
240
+
241
+ @staticmethod
242
+ def findAll(tree:ParseTree, xpath:str, parser:Parser):
243
+ p = XPath(parser, xpath)
244
+ return p.evaluate(tree)
245
+
246
+ #
247
+ # Return a list of all nodes starting at {@code t} as root that satisfy the
248
+ # path. The root {@code /} is relative to the node passed to
249
+ # {@link #evaluate}.
250
+ #
251
+ def evaluate(self, t:ParseTree):
252
+ dummyRoot = ParserRuleContext()
253
+ dummyRoot.children = [t] # don't set t's parent.
254
+
255
+ work = [dummyRoot]
256
+ for element in self.elements:
257
+ work_next = list()
258
+ for node in work:
259
+ if not isinstance(node, TerminalNode) and node.children:
260
+ # only try to match next element if it has children
261
+ # e.g., //func/*/stat might have a token node for which
262
+ # we can't go looking for stat nodes.
263
+ matching = element.evaluate(node)
264
+
265
+ # See issue antlr#370 - Prevents XPath from returning the
266
+ # same node multiple times
267
+ matching = filter(lambda m: m not in work_next, matching)
268
+
269
+ work_next.extend(matching)
270
+ work = work_next
271
+
272
+ return work
273
+
274
+
275
+ class XPathElement(object):
276
+
277
+ def __init__(self, nodeName:str):
278
+ self.nodeName = nodeName
279
+ self.invert = False
280
+
281
+ def __str__(self):
282
+ return type(self).__name__ + "[" + ("!" if self.invert else "") + self.nodeName + "]"
283
+
284
+
285
+
286
+ #
287
+ # Either {@code ID} at start of path or {@code ...//ID} in middle of path.
288
+ #
289
+ class XPathRuleAnywhereElement(XPathElement):
290
+
291
+ def __init__(self, ruleName:str, ruleIndex:int):
292
+ super().__init__(ruleName)
293
+ self.ruleIndex = ruleIndex
294
+
295
+ def evaluate(self, t:ParseTree):
296
+ # return all ParserRuleContext descendants of t that match ruleIndex (or do not match if inverted)
297
+ return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.descendants(t))
298
+
299
+ class XPathRuleElement(XPathElement):
300
+
301
+ def __init__(self, ruleName:str, ruleIndex:int):
302
+ super().__init__(ruleName)
303
+ self.ruleIndex = ruleIndex
304
+
305
+ def evaluate(self, t:ParseTree):
306
+ # return all ParserRuleContext children of t that match ruleIndex (or do not match if inverted)
307
+ return filter(lambda c: isinstance(c, ParserRuleContext) and (self.invert ^ (c.getRuleIndex() == self.ruleIndex)), Trees.getChildren(t))
308
+
309
+ class XPathTokenAnywhereElement(XPathElement):
310
+
311
+ def __init__(self, ruleName:str, tokenType:int):
312
+ super().__init__(ruleName)
313
+ self.tokenType = tokenType
314
+
315
+ def evaluate(self, t:ParseTree):
316
+ # return all TerminalNode descendants of t that match tokenType (or do not match if inverted)
317
+ return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.descendants(t))
318
+
319
+ class XPathTokenElement(XPathElement):
320
+
321
+ def __init__(self, ruleName:str, tokenType:int):
322
+ super().__init__(ruleName)
323
+ self.tokenType = tokenType
324
+
325
+ def evaluate(self, t:ParseTree):
326
+ # return all TerminalNode children of t that match tokenType (or do not match if inverted)
327
+ return filter(lambda c: isinstance(c, TerminalNode) and (self.invert ^ (c.symbol.type == self.tokenType)), Trees.getChildren(t))
328
+
329
+
330
+ class XPathWildcardAnywhereElement(XPathElement):
331
+
332
+ def __init__(self):
333
+ super().__init__(XPath.WILDCARD)
334
+
335
+ def evaluate(self, t:ParseTree):
336
+ if self.invert:
337
+ return list() # !* is weird but valid (empty)
338
+ else:
339
+ return Trees.descendants(t)
340
+
341
+
342
+ class XPathWildcardElement(XPathElement):
343
+
344
+ def __init__(self):
345
+ super().__init__(XPath.WILDCARD)
346
+
347
+
348
+ def evaluate(self, t:ParseTree):
349
+ if self.invert:
350
+ return list() # !* is weird but valid (empty)
351
+ else:
352
+ return Trees.getChildren(t)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4/xpath/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __author__ = 'ericvergnaud'
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/lxml/objectify.cpython-38-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc2c2f00ed7144dfeb12897e04f51dd6bf905930578bbe052a0e474b9ee7312
3
+ size 4436376
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/__pycache__/backend_bases.cpython-38.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c2eb33aec3f02afd03cdb233ada4c92a78615d25f8c2a174a593b4b2df9df19
3
+ size 115881
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_api/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (10.3 kB). View file
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_api/__pycache__/deprecation.cpython-38.pyc ADDED
Binary file (17.6 kB). View file