ZTWHHH commited on
Commit
dca6dc5
·
verified ·
1 Parent(s): ee4c7ed

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. evalkit_tf437/lib/python3.10/site-packages/aiohttp-3.10.10.dist-info/top_level.txt +1 -0
  3. evalkit_tf437/lib/python3.10/site-packages/antlr4/IntervalSet.py +180 -0
  4. evalkit_tf437/lib/python3.10/site-packages/antlr4/RuleContext.py +227 -0
  5. evalkit_tf437/lib/python3.10/site-packages/antlr4/Token.py +155 -0
  6. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/BufferedTokenStream.cpython-310.pyc +0 -0
  7. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/CommonTokenStream.cpython-310.pyc +0 -0
  8. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/FileStream.cpython-310.pyc +0 -0
  9. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/InputStream.cpython-310.pyc +0 -0
  10. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/IntervalSet.cpython-310.pyc +0 -0
  11. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/LL1Analyzer.cpython-310.pyc +0 -0
  12. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/Lexer.cpython-310.pyc +0 -0
  13. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/ParserInterpreter.cpython-310.pyc +0 -0
  14. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/ParserRuleContext.cpython-310.pyc +0 -0
  15. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/PredictionContext.cpython-310.pyc +0 -0
  16. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/RuleContext.cpython-310.pyc +0 -0
  17. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/TokenStreamRewriter.cpython-310.pyc +0 -0
  18. evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/Utils.cpython-310.pyc +0 -0
  19. evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/ATN.py +132 -0
  20. evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/PredictionMode.py +499 -0
  21. evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/ATNConfig.cpython-310.pyc +0 -0
  22. evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/ATNConfigSet.cpython-310.pyc +0 -0
  23. evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/ATNSimulator.cpython-310.pyc +0 -0
  24. evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/ATNType.cpython-310.pyc +0 -0
  25. evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/PredictionMode.cpython-310.pyc +0 -0
  26. evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/SemanticContext.cpython-310.pyc +0 -0
  27. evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/DFASerializer.py +73 -0
  28. evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/DFAState.py +126 -0
  29. evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/__init__.py +1 -0
  30. evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/__pycache__/DFA.cpython-310.pyc +0 -0
  31. evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/__pycache__/DFASerializer.cpython-310.pyc +0 -0
  32. evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/__pycache__/DFAState.cpython-310.pyc +0 -0
  33. evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/__pycache__/__init__.cpython-310.pyc +0 -0
  34. evalkit_tf437/lib/python3.10/site-packages/antlr4/error/DiagnosticErrorListener.py +107 -0
  35. evalkit_tf437/lib/python3.10/site-packages/antlr4/error/__init__.py +1 -0
  36. evalkit_tf437/lib/python3.10/site-packages/antlr4/error/__pycache__/DiagnosticErrorListener.cpython-310.pyc +0 -0
  37. evalkit_tf437/lib/python3.10/site-packages/antlr4/error/__pycache__/ErrorListener.cpython-310.pyc +0 -0
  38. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/Chunk.py +30 -0
  39. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/ParseTreePattern.py +72 -0
  40. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/ParseTreePatternMatcher.py +374 -0
  41. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/RuleTagToken.py +50 -0
  42. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/TokenTagToken.py +47 -0
  43. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/Tree.py +191 -0
  44. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__init__.py +0 -0
  45. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__pycache__/ParseTreeMatch.cpython-310.pyc +0 -0
  46. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__pycache__/RuleTagToken.cpython-310.pyc +0 -0
  47. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__pycache__/TokenTagToken.cpython-310.pyc +0 -0
  48. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__pycache__/Tree.cpython-310.pyc +0 -0
  49. evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__pycache__/Trees.cpython-310.pyc +0 -0
  50. evalkit_tf437/lib/python3.10/site-packages/flash_attn-2.6.1.dist-info/INSTALLER +1 -0
.gitattributes CHANGED
@@ -324,3 +324,7 @@ evalkit_tf437/lib/python3.10/site-packages/pandas/_libs/tslibs/timestamps.cpytho
324
  evalkit_tf437/lib/python3.10/site-packages/sklearn/tree/_tree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
325
  evalkit_tf437/lib/python3.10/site-packages/pandas/_libs/ops.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
326
  evalkit_tf437/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
324
  evalkit_tf437/lib/python3.10/site-packages/sklearn/tree/_tree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
325
  evalkit_tf437/lib/python3.10/site-packages/pandas/_libs/ops.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
326
  evalkit_tf437/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 filter=lfs diff=lfs merge=lfs -text
327
+ evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libquadmath-96973f99.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
328
+ evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libgfortran-91cc3cb1.so.3.0.0 filter=lfs diff=lfs merge=lfs -text
329
+ evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-xkb-9ba31ab3.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
330
+ evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxkbcommon-71ae2972.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
evalkit_tf437/lib/python3.10/site-packages/aiohttp-3.10.10.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ aiohttp
evalkit_tf437/lib/python3.10/site-packages/antlr4/IntervalSet.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ from io import StringIO
8
+ from antlr4.Token import Token
9
+
10
+ # need forward declarations
11
+ IntervalSet = None
12
+
13
+ class IntervalSet(object):
14
+ __slots__ = ('intervals', 'readonly')
15
+
16
+ def __init__(self):
17
+ self.intervals = None
18
+ self.readonly = False
19
+
20
+ def __iter__(self):
21
+ if self.intervals is not None:
22
+ for i in self.intervals:
23
+ for c in i:
24
+ yield c
25
+
26
+ def __getitem__(self, item):
27
+ i = 0
28
+ for k in self:
29
+ if i==item:
30
+ return k
31
+ else:
32
+ i += 1
33
+ return Token.INVALID_TYPE
34
+
35
+ def addOne(self, v:int):
36
+ self.addRange(range(v, v+1))
37
+
38
+ def addRange(self, v:range):
39
+ if self.intervals is None:
40
+ self.intervals = list()
41
+ self.intervals.append(v)
42
+ else:
43
+ # find insert pos
44
+ k = 0
45
+ for i in self.intervals:
46
+ # distinct range -> insert
47
+ if v.stop<i.start:
48
+ self.intervals.insert(k, v)
49
+ return
50
+ # contiguous range -> adjust
51
+ elif v.stop==i.start:
52
+ self.intervals[k] = range(v.start, i.stop)
53
+ return
54
+ # overlapping range -> adjust and reduce
55
+ elif v.start<=i.stop:
56
+ self.intervals[k] = range(min(i.start,v.start), max(i.stop,v.stop))
57
+ self.reduce(k)
58
+ return
59
+ k += 1
60
+ # greater than any existing
61
+ self.intervals.append(v)
62
+
63
+ def addSet(self, other:IntervalSet):
64
+ if other.intervals is not None:
65
+ for i in other.intervals:
66
+ self.addRange(i)
67
+ return self
68
+
69
+ def reduce(self, k:int):
70
+ # only need to reduce if k is not the last
71
+ if k<len(self.intervals)-1:
72
+ l = self.intervals[k]
73
+ r = self.intervals[k+1]
74
+ # if r contained in l
75
+ if l.stop >= r.stop:
76
+ self.intervals.pop(k+1)
77
+ self.reduce(k)
78
+ elif l.stop >= r.start:
79
+ self.intervals[k] = range(l.start, r.stop)
80
+ self.intervals.pop(k+1)
81
+
82
+ def complement(self, start, stop):
83
+ result = IntervalSet()
84
+ result.addRange(range(start,stop+1))
85
+ for i in self.intervals:
86
+ result.removeRange(i)
87
+ return result
88
+
89
+ def __contains__(self, item):
90
+ if self.intervals is None:
91
+ return False
92
+ else:
93
+ return any(item in i for i in self.intervals)
94
+
95
+ def __len__(self):
96
+ return sum(len(i) for i in self.intervals)
97
+
98
+ def removeRange(self, v):
99
+ if v.start==v.stop-1:
100
+ self.removeOne(v.start)
101
+ elif self.intervals is not None:
102
+ k = 0
103
+ for i in self.intervals:
104
+ # intervals are ordered
105
+ if v.stop<=i.start:
106
+ return
107
+ # check for including range, split it
108
+ elif v.start>i.start and v.stop<i.stop:
109
+ self.intervals[k] = range(i.start, v.start)
110
+ x = range(v.stop, i.stop)
111
+ self.intervals.insert(k, x)
112
+ return
113
+ # check for included range, remove it
114
+ elif v.start<=i.start and v.stop>=i.stop:
115
+ self.intervals.pop(k)
116
+ k -= 1 # need another pass
117
+ # check for lower boundary
118
+ elif v.start<i.stop:
119
+ self.intervals[k] = range(i.start, v.start)
120
+ # check for upper boundary
121
+ elif v.stop<i.stop:
122
+ self.intervals[k] = range(v.stop, i.stop)
123
+ k += 1
124
+
125
+ def removeOne(self, v):
126
+ if self.intervals is not None:
127
+ k = 0
128
+ for i in self.intervals:
129
+ # intervals is ordered
130
+ if v<i.start:
131
+ return
132
+ # check for single value range
133
+ elif v==i.start and v==i.stop-1:
134
+ self.intervals.pop(k)
135
+ return
136
+ # check for lower boundary
137
+ elif v==i.start:
138
+ self.intervals[k] = range(i.start+1, i.stop)
139
+ return
140
+ # check for upper boundary
141
+ elif v==i.stop-1:
142
+ self.intervals[k] = range(i.start, i.stop-1)
143
+ return
144
+ # split existing range
145
+ elif v<i.stop-1:
146
+ x = range(i.start, v)
147
+ self.intervals[k] = range(v + 1, i.stop)
148
+ self.intervals.insert(k, x)
149
+ return
150
+ k += 1
151
+
152
+
153
+ def toString(self, literalNames:list, symbolicNames:list):
154
+ if self.intervals is None:
155
+ return "{}"
156
+ with StringIO() as buf:
157
+ if len(self)>1:
158
+ buf.write("{")
159
+ first = True
160
+ for i in self.intervals:
161
+ for j in i:
162
+ if not first:
163
+ buf.write(", ")
164
+ buf.write(self.elementName(literalNames, symbolicNames, j))
165
+ first = False
166
+ if len(self)>1:
167
+ buf.write("}")
168
+ return buf.getvalue()
169
+
170
+ def elementName(self, literalNames:list, symbolicNames:list, a:int):
171
+ if a==Token.EOF:
172
+ return "<EOF>"
173
+ elif a==Token.EPSILON:
174
+ return "<EPSILON>"
175
+ else:
176
+ if a<len(literalNames) and literalNames[a] != "<INVALID>":
177
+ return literalNames[a]
178
+ if a<len(symbolicNames):
179
+ return symbolicNames[a]
180
+ return "<UNKNOWN>"
evalkit_tf437/lib/python3.10/site-packages/antlr4/RuleContext.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
2
+ # Use of this file is governed by the BSD 3-clause license that
3
+ # can be found in the LICENSE.txt file in the project root.
4
+ #/
5
+
6
+
7
+ # A rule context is a record of a single rule invocation. It knows
8
+ # which context invoked it, if any. If there is no parent context, then
9
+ # naturally the invoking state is not valid. The parent link
10
+ # provides a chain upwards from the current rule invocation to the root
11
+ # of the invocation tree, forming a stack. We actually carry no
12
+ # information about the rule associated with this context (except
13
+ # when parsing). We keep only the state number of the invoking state from
14
+ # the ATN submachine that invoked this. Contrast this with the s
15
+ # pointer inside ParserRuleContext that tracks the current state
16
+ # being "executed" for the current rule.
17
+ #
18
+ # The parent contexts are useful for computing lookahead sets and
19
+ # getting error information.
20
+ #
21
+ # These objects are used during parsing and prediction.
22
+ # For the special case of parsers, we use the subclass
23
+ # ParserRuleContext.
24
+ #
25
+ # @see ParserRuleContext
26
+ #/
27
+ from io import StringIO
28
+ from antlr4.tree.Tree import RuleNode, INVALID_INTERVAL, ParseTreeVisitor
29
+ from antlr4.tree.Trees import Trees
30
+
31
+ # need forward declarations
32
+ RuleContext = None
33
+ Parser = None
34
+
35
+ class RuleContext(RuleNode):
36
+ __slots__ = ('parentCtx', 'invokingState')
37
+ EMPTY = None
38
+
39
+ def __init__(self, parent:RuleContext=None, invokingState:int=-1):
40
+ super().__init__()
41
+ # What context invoked this rule?
42
+ self.parentCtx = parent
43
+ # What state invoked the rule associated with this context?
44
+ # The "return address" is the followState of invokingState
45
+ # If parent is null, this should be -1.
46
+ self.invokingState = invokingState
47
+
48
+
49
+ def depth(self):
50
+ n = 0
51
+ p = self
52
+ while p is not None:
53
+ p = p.parentCtx
54
+ n += 1
55
+ return n
56
+
57
+ # A context is empty if there is no invoking state; meaning nobody call
58
+ # current context.
59
+ def isEmpty(self):
60
+ return self.invokingState == -1
61
+
62
+ # satisfy the ParseTree / SyntaxTree interface
63
+
64
+ def getSourceInterval(self):
65
+ return INVALID_INTERVAL
66
+
67
+ def getRuleContext(self):
68
+ return self
69
+
70
+ def getPayload(self):
71
+ return self
72
+
73
+ # Return the combined text of all child nodes. This method only considers
74
+ # tokens which have been added to the parse tree.
75
+ # <p>
76
+ # Since tokens on hidden channels (e.g. whitespace or comments) are not
77
+ # added to the parse trees, they will not appear in the output of this
78
+ # method.
79
+ #/
80
+ def getText(self):
81
+ if self.getChildCount() == 0:
82
+ return ""
83
+ with StringIO() as builder:
84
+ for child in self.getChildren():
85
+ builder.write(child.getText())
86
+ return builder.getvalue()
87
+
88
+ def getRuleIndex(self):
89
+ return -1
90
+
91
+ # For rule associated with this parse tree internal node, return
92
+ # the outer alternative number used to match the input. Default
93
+ # implementation does not compute nor store this alt num. Create
94
+ # a subclass of ParserRuleContext with backing field and set
95
+ # option contextSuperClass.
96
+ # to set it.
97
+ def getAltNumber(self):
98
+ return 0 # should use ATN.INVALID_ALT_NUMBER but won't compile
99
+
100
+ # Set the outer alternative number for this context node. Default
101
+ # implementation does nothing to avoid backing field overhead for
102
+ # trees that don't need it. Create
103
+ # a subclass of ParserRuleContext with backing field and set
104
+ # option contextSuperClass.
105
+ def setAltNumber(self, altNumber:int):
106
+ pass
107
+
108
+ def getChild(self, i:int):
109
+ return None
110
+
111
+ def getChildCount(self):
112
+ return 0
113
+
114
+ def getChildren(self):
115
+ for c in []:
116
+ yield c
117
+
118
+ def accept(self, visitor:ParseTreeVisitor):
119
+ return visitor.visitChildren(self)
120
+
121
+ # # Call this method to view a parse tree in a dialog box visually.#/
122
+ # public Future<JDialog> inspect(@Nullable Parser parser) {
123
+ # List<String> ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
124
+ # return inspect(ruleNames);
125
+ # }
126
+ #
127
+ # public Future<JDialog> inspect(@Nullable List<String> ruleNames) {
128
+ # TreeViewer viewer = new TreeViewer(ruleNames, this);
129
+ # return viewer.open();
130
+ # }
131
+ #
132
+ # # Save this tree in a postscript file#/
133
+ # public void save(@Nullable Parser parser, String fileName)
134
+ # throws IOException, PrintException
135
+ # {
136
+ # List<String> ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
137
+ # save(ruleNames, fileName);
138
+ # }
139
+ #
140
+ # # Save this tree in a postscript file using a particular font name and size#/
141
+ # public void save(@Nullable Parser parser, String fileName,
142
+ # String fontName, int fontSize)
143
+ # throws IOException
144
+ # {
145
+ # List<String> ruleNames = parser != null ? Arrays.asList(parser.getRuleNames()) : null;
146
+ # save(ruleNames, fileName, fontName, fontSize);
147
+ # }
148
+ #
149
+ # # Save this tree in a postscript file#/
150
+ # public void save(@Nullable List<String> ruleNames, String fileName)
151
+ # throws IOException, PrintException
152
+ # {
153
+ # Trees.writePS(this, ruleNames, fileName);
154
+ # }
155
+ #
156
+ # # Save this tree in a postscript file using a particular font name and size#/
157
+ # public void save(@Nullable List<String> ruleNames, String fileName,
158
+ # String fontName, int fontSize)
159
+ # throws IOException
160
+ # {
161
+ # Trees.writePS(this, ruleNames, fileName, fontName, fontSize);
162
+ # }
163
+ #
164
+ # # Print out a whole tree, not just a node, in LISP format
165
+ # # (root child1 .. childN). Print just a node if this is a leaf.
166
+ # # We have to know the recognizer so we can get rule names.
167
+ # #/
168
+ # @Override
169
+ # public String toStringTree(@Nullable Parser recog) {
170
+ # return Trees.toStringTree(this, recog);
171
+ # }
172
+ #
173
+ # Print out a whole tree, not just a node, in LISP format
174
+ # (root child1 .. childN). Print just a node if this is a leaf.
175
+ #
176
+ def toStringTree(self, ruleNames:list=None, recog:Parser=None):
177
+ return Trees.toStringTree(self, ruleNames=ruleNames, recog=recog)
178
+ # }
179
+ #
180
+ # @Override
181
+ # public String toStringTree() {
182
+ # return toStringTree((List<String>)null);
183
+ # }
184
+ #
185
+ def __str__(self):
186
+ return self.toString(None, None)
187
+
188
+ # @Override
189
+ # public String toString() {
190
+ # return toString((List<String>)null, (RuleContext)null);
191
+ # }
192
+ #
193
+ # public final String toString(@Nullable Recognizer<?,?> recog) {
194
+ # return toString(recog, ParserRuleContext.EMPTY);
195
+ # }
196
+ #
197
+ # public final String toString(@Nullable List<String> ruleNames) {
198
+ # return toString(ruleNames, null);
199
+ # }
200
+ #
201
+ # // recog null unless ParserRuleContext, in which case we use subclass toString(...)
202
+ # public String toString(@Nullable Recognizer<?,?> recog, @Nullable RuleContext stop) {
203
+ # String[] ruleNames = recog != null ? recog.getRuleNames() : null;
204
+ # List<String> ruleNamesList = ruleNames != null ? Arrays.asList(ruleNames) : null;
205
+ # return toString(ruleNamesList, stop);
206
+ # }
207
+
208
+ def toString(self, ruleNames:list, stop:RuleContext)->str:
209
+ with StringIO() as buf:
210
+ p = self
211
+ buf.write("[")
212
+ while p is not None and p is not stop:
213
+ if ruleNames is None:
214
+ if not p.isEmpty():
215
+ buf.write(str(p.invokingState))
216
+ else:
217
+ ri = p.getRuleIndex()
218
+ ruleName = ruleNames[ri] if ri >= 0 and ri < len(ruleNames) else str(ri)
219
+ buf.write(ruleName)
220
+
221
+ if p.parentCtx is not None and (ruleNames is not None or not p.parentCtx.isEmpty()):
222
+ buf.write(" ")
223
+
224
+ p = p.parentCtx
225
+
226
+ buf.write("]")
227
+ return buf.getvalue()
evalkit_tf437/lib/python3.10/site-packages/antlr4/Token.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
2
+ # Use of this file is governed by the BSD 3-clause license that
3
+ # can be found in the LICENSE.txt file in the project root.
4
+ #
5
+
6
+ # A token has properties: text, type, line, character position in the line
7
+ # (so we can ignore tabs), token channel, index, and source from which
8
+ # we obtained this token.
9
+ from io import StringIO
10
+
11
+
12
+ class Token (object):
13
+ __slots__ = ('source', 'type', 'channel', 'start', 'stop', 'tokenIndex', 'line', 'column', '_text')
14
+
15
+ INVALID_TYPE = 0
16
+
17
+ # During lookahead operations, this "token" signifies we hit rule end ATN state
18
+ # and did not follow it despite needing to.
19
+ EPSILON = -2
20
+
21
+ MIN_USER_TOKEN_TYPE = 1
22
+
23
+ EOF = -1
24
+
25
+ # All tokens go to the parser (unless skip() is called in that rule)
26
+ # on a particular "channel". The parser tunes to a particular channel
27
+ # so that whitespace etc... can go to the parser on a "hidden" channel.
28
+
29
+ DEFAULT_CHANNEL = 0
30
+
31
+ # Anything on different channel than DEFAULT_CHANNEL is not parsed
32
+ # by parser.
33
+
34
+ HIDDEN_CHANNEL = 1
35
+
36
+ def __init__(self):
37
+ self.source = None
38
+ self.type = None # token type of the token
39
+ self.channel = None # The parser ignores everything not on DEFAULT_CHANNEL
40
+ self.start = None # optional; return -1 if not implemented.
41
+ self.stop = None # optional; return -1 if not implemented.
42
+ self.tokenIndex = None # from 0..n-1 of the token object in the input stream
43
+ self.line = None # line=1..n of the 1st character
44
+ self.column = None # beginning of the line at which it occurs, 0..n-1
45
+ self._text = None # text of the token.
46
+
47
+ @property
48
+ def text(self):
49
+ return self._text
50
+
51
+ # Explicitly set the text for this token. If {code text} is not
52
+ # {@code null}, then {@link #getText} will return this value rather than
53
+ # extracting the text from the input.
54
+ #
55
+ # @param text The explicit text of the token, or {@code null} if the text
56
+ # should be obtained from the input along with the start and stop indexes
57
+ # of the token.
58
+
59
+ @text.setter
60
+ def text(self, text:str):
61
+ self._text = text
62
+
63
+
64
+ def getTokenSource(self):
65
+ return self.source[0]
66
+
67
+ def getInputStream(self):
68
+ return self.source[1]
69
+
70
+ class CommonToken(Token):
71
+
72
+ # An empty {@link Pair} which is used as the default value of
73
+ # {@link #source} for tokens that do not have a source.
74
+ EMPTY_SOURCE = (None, None)
75
+
76
+ def __init__(self, source:tuple = EMPTY_SOURCE, type:int = None, channel:int=Token.DEFAULT_CHANNEL, start:int=-1, stop:int=-1):
77
+ super().__init__()
78
+ self.source = source
79
+ self.type = type
80
+ self.channel = channel
81
+ self.start = start
82
+ self.stop = stop
83
+ self.tokenIndex = -1
84
+ if source[0] is not None:
85
+ self.line = source[0].line
86
+ self.column = source[0].column
87
+ else:
88
+ self.column = -1
89
+
90
+ # Constructs a new {@link CommonToken} as a copy of another {@link Token}.
91
+ #
92
+ # <p>
93
+ # If {@code oldToken} is also a {@link CommonToken} instance, the newly
94
+ # constructed token will share a reference to the {@link #text} field and
95
+ # the {@link Pair} stored in {@link #source}. Otherwise, {@link #text} will
96
+ # be assigned the result of calling {@link #getText}, and {@link #source}
97
+ # will be constructed from the result of {@link Token#getTokenSource} and
98
+ # {@link Token#getInputStream}.</p>
99
+ #
100
+ # @param oldToken The token to copy.
101
+ #
102
+ def clone(self):
103
+ t = CommonToken(self.source, self.type, self.channel, self.start, self.stop)
104
+ t.tokenIndex = self.tokenIndex
105
+ t.line = self.line
106
+ t.column = self.column
107
+ t.text = self.text
108
+ return t
109
+
110
+ @property
111
+ def text(self):
112
+ if self._text is not None:
113
+ return self._text
114
+ input = self.getInputStream()
115
+ if input is None:
116
+ return None
117
+ n = input.size
118
+ if self.start < n and self.stop < n:
119
+ return input.getText(self.start, self.stop)
120
+ else:
121
+ return "<EOF>"
122
+
123
+ @text.setter
124
+ def text(self, text:str):
125
+ self._text = text
126
+
127
+ def __str__(self):
128
+ with StringIO() as buf:
129
+ buf.write("[@")
130
+ buf.write(str(self.tokenIndex))
131
+ buf.write(",")
132
+ buf.write(str(self.start))
133
+ buf.write(":")
134
+ buf.write(str(self.stop))
135
+ buf.write("='")
136
+ txt = self.text
137
+ if txt is not None:
138
+ txt = txt.replace("\n","\\n")
139
+ txt = txt.replace("\r","\\r")
140
+ txt = txt.replace("\t","\\t")
141
+ else:
142
+ txt = "<no text>"
143
+ buf.write(txt)
144
+ buf.write("',<")
145
+ buf.write(str(self.type))
146
+ buf.write(">")
147
+ if self.channel > 0:
148
+ buf.write(",channel=")
149
+ buf.write(str(self.channel))
150
+ buf.write(",")
151
+ buf.write(str(self.line))
152
+ buf.write(":")
153
+ buf.write(str(self.column))
154
+ buf.write("]")
155
+ return buf.getvalue()
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/BufferedTokenStream.cpython-310.pyc ADDED
Binary file (7.02 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/CommonTokenStream.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/FileStream.cpython-310.pyc ADDED
Binary file (983 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/InputStream.cpython-310.pyc ADDED
Binary file (2.75 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/IntervalSet.cpython-310.pyc ADDED
Binary file (4.71 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/LL1Analyzer.cpython-310.pyc ADDED
Binary file (3.27 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/Lexer.cpython-310.pyc ADDED
Binary file (7.71 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/ParserInterpreter.cpython-310.pyc ADDED
Binary file (5.04 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/ParserRuleContext.cpython-310.pyc ADDED
Binary file (4.65 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/PredictionContext.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/RuleContext.cpython-310.pyc ADDED
Binary file (3.68 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/TokenStreamRewriter.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/__pycache__/Utils.cpython-310.pyc ADDED
Binary file (869 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/ATN.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
2
+ # Use of this file is governed by the BSD 3-clause license that
3
+ # can be found in the LICENSE.txt file in the project root.
4
+ #/
5
+ from antlr4.IntervalSet import IntervalSet
6
+
7
+ from antlr4.RuleContext import RuleContext
8
+
9
+ from antlr4.Token import Token
10
+ from antlr4.atn.ATNType import ATNType
11
+ from antlr4.atn.ATNState import ATNState, DecisionState
12
+
13
+
14
+ class ATN(object):
15
+ __slots__ = (
16
+ 'grammarType', 'maxTokenType', 'states', 'decisionToState',
17
+ 'ruleToStartState', 'ruleToStopState', 'modeNameToStartState',
18
+ 'ruleToTokenType', 'lexerActions', 'modeToStartState'
19
+ )
20
+
21
+ INVALID_ALT_NUMBER = 0
22
+
23
+ # Used for runtime deserialization of ATNs from strings#/
24
+ def __init__(self, grammarType:ATNType , maxTokenType:int ):
25
+ # The type of the ATN.
26
+ self.grammarType = grammarType
27
+ # The maximum value for any symbol recognized by a transition in the ATN.
28
+ self.maxTokenType = maxTokenType
29
+ self.states = []
30
+ # Each subrule/rule is a decision point and we must track them so we
31
+ # can go back later and build DFA predictors for them. This includes
32
+ # all the rules, subrules, optional blocks, ()+, ()* etc...
33
+ self.decisionToState = []
34
+ # Maps from rule index to starting state number.
35
+ self.ruleToStartState = []
36
+ # Maps from rule index to stop state number.
37
+ self.ruleToStopState = None
38
+ self.modeNameToStartState = dict()
39
+ # For lexer ATNs, this maps the rule index to the resulting token type.
40
+ # For parser ATNs, this maps the rule index to the generated bypass token
41
+ # type if the
42
+ # {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions}
43
+ # deserialization option was specified; otherwise, this is {@code null}.
44
+ self.ruleToTokenType = None
45
+ # For lexer ATNs, this is an array of {@link LexerAction} objects which may
46
+ # be referenced by action transitions in the ATN.
47
+ self.lexerActions = None
48
+ self.modeToStartState = []
49
+
50
+ # Compute the set of valid tokens that can occur starting in state {@code s}.
51
+ # If {@code ctx} is null, the set of tokens will not include what can follow
52
+ # the rule surrounding {@code s}. In other words, the set will be
53
+ # restricted to tokens reachable staying within {@code s}'s rule.
54
+ def nextTokensInContext(self, s:ATNState, ctx:RuleContext):
55
+ from antlr4.LL1Analyzer import LL1Analyzer
56
+ anal = LL1Analyzer(self)
57
+ return anal.LOOK(s, ctx=ctx)
58
+
59
+ # Compute the set of valid tokens that can occur starting in {@code s} and
60
+ # staying in same rule. {@link Token#EPSILON} is in set if we reach end of
61
+ # rule.
62
+ def nextTokensNoContext(self, s:ATNState):
63
+ if s.nextTokenWithinRule is not None:
64
+ return s.nextTokenWithinRule
65
+ s.nextTokenWithinRule = self.nextTokensInContext(s, None)
66
+ s.nextTokenWithinRule.readonly = True
67
+ return s.nextTokenWithinRule
68
+
69
+ def nextTokens(self, s:ATNState, ctx:RuleContext = None):
70
+ if ctx==None:
71
+ return self.nextTokensNoContext(s)
72
+ else:
73
+ return self.nextTokensInContext(s, ctx)
74
+
75
+ def addState(self, state:ATNState):
76
+ if state is not None:
77
+ state.atn = self
78
+ state.stateNumber = len(self.states)
79
+ self.states.append(state)
80
+
81
+ def removeState(self, state:ATNState):
82
+ self.states[state.stateNumber] = None # just free mem, don't shift states in list
83
+
84
+ def defineDecisionState(self, s:DecisionState):
85
+ self.decisionToState.append(s)
86
+ s.decision = len(self.decisionToState)-1
87
+ return s.decision
88
+
89
+ def getDecisionState(self, decision:int):
90
+ if len(self.decisionToState)==0:
91
+ return None
92
+ else:
93
+ return self.decisionToState[decision]
94
+
95
+ # Computes the set of input symbols which could follow ATN state number
96
+ # {@code stateNumber} in the specified full {@code context}. This method
97
+ # considers the complete parser context, but does not evaluate semantic
98
+ # predicates (i.e. all predicates encountered during the calculation are
99
+ # assumed true). If a path in the ATN exists from the starting state to the
100
+ # {@link RuleStopState} of the outermost context without matching any
101
+ # symbols, {@link Token#EOF} is added to the returned set.
102
+ #
103
+ # <p>If {@code context} is {@code null}, it is treated as
104
+ # {@link ParserRuleContext#EMPTY}.</p>
105
+ #
106
+ # @param stateNumber the ATN state number
107
+ # @param context the full parse context
108
+ # @return The set of potentially valid input symbols which could follow the
109
+ # specified state in the specified context.
110
+ # @throws IllegalArgumentException if the ATN does not contain a state with
111
+ # number {@code stateNumber}
112
+ #/
113
+ def getExpectedTokens(self, stateNumber:int, ctx:RuleContext ):
114
+ if stateNumber < 0 or stateNumber >= len(self.states):
115
+ raise Exception("Invalid state number.")
116
+ s = self.states[stateNumber]
117
+ following = self.nextTokens(s)
118
+ if Token.EPSILON not in following:
119
+ return following
120
+ expected = IntervalSet()
121
+ expected.addSet(following)
122
+ expected.removeOne(Token.EPSILON)
123
+ while (ctx != None and ctx.invokingState >= 0 and Token.EPSILON in following):
124
+ invokingState = self.states[ctx.invokingState]
125
+ rt = invokingState.transitions[0]
126
+ following = self.nextTokens(rt.followState)
127
+ expected.addSet(following)
128
+ expected.removeOne(Token.EPSILON)
129
+ ctx = ctx.parentCtx
130
+ if Token.EPSILON in following:
131
+ expected.addOne(Token.EOF)
132
+ return expected
evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/PredictionMode.py ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+ #
7
+ # This enumeration defines the prediction modes available in ANTLR 4 along with
8
+ # utility methods for analyzing configuration sets for conflicts and/or
9
+ # ambiguities.
10
+
11
+
12
+ from enum import Enum
13
+ from antlr4.atn.ATN import ATN
14
+ from antlr4.atn.ATNConfig import ATNConfig
15
+ from antlr4.atn.ATNConfigSet import ATNConfigSet
16
+ from antlr4.atn.ATNState import RuleStopState
17
+ from antlr4.atn.SemanticContext import SemanticContext
18
+
19
+ PredictionMode = None
20
+
21
+ class PredictionMode(Enum):
22
+ #
23
+ # The SLL(*) prediction mode. This prediction mode ignores the current
24
+ # parser context when making predictions. This is the fastest prediction
25
+ # mode, and provides correct results for many grammars. This prediction
26
+ # mode is more powerful than the prediction mode provided by ANTLR 3, but
27
+ # may result in syntax errors for grammar and input combinations which are
28
+ # not SLL.
29
+ #
30
+ # <p>
31
+ # When using this prediction mode, the parser will either return a correct
32
+ # parse tree (i.e. the same parse tree that would be returned with the
33
+ # {@link #LL} prediction mode), or it will report a syntax error. If a
34
+ # syntax error is encountered when using the {@link #SLL} prediction mode,
35
+ # it may be due to either an actual syntax error in the input or indicate
36
+ # that the particular combination of grammar and input requires the more
37
+ # powerful {@link #LL} prediction abilities to complete successfully.</p>
38
+ #
39
+ # <p>
40
+ # This prediction mode does not provide any guarantees for prediction
41
+ # behavior for syntactically-incorrect inputs.</p>
42
+ #
43
+ SLL = 0
44
+ #
45
+ # The LL(*) prediction mode. This prediction mode allows the current parser
46
+ # context to be used for resolving SLL conflicts that occur during
47
+ # prediction. This is the fastest prediction mode that guarantees correct
48
+ # parse results for all combinations of grammars with syntactically correct
49
+ # inputs.
50
+ #
51
+ # <p>
52
+ # When using this prediction mode, the parser will make correct decisions
53
+ # for all syntactically-correct grammar and input combinations. However, in
54
+ # cases where the grammar is truly ambiguous this prediction mode might not
55
+ # report a precise answer for <em>exactly which</em> alternatives are
56
+ # ambiguous.</p>
57
+ #
58
+ # <p>
59
+ # This prediction mode does not provide any guarantees for prediction
60
+ # behavior for syntactically-incorrect inputs.</p>
61
+ #
62
+ LL = 1
63
+ #
64
+ # The LL(*) prediction mode with exact ambiguity detection. In addition to
65
+ # the correctness guarantees provided by the {@link #LL} prediction mode,
66
+ # this prediction mode instructs the prediction algorithm to determine the
67
+ # complete and exact set of ambiguous alternatives for every ambiguous
68
+ # decision encountered while parsing.
69
+ #
70
+ # <p>
71
+ # This prediction mode may be used for diagnosing ambiguities during
72
+ # grammar development. Due to the performance overhead of calculating sets
73
+ # of ambiguous alternatives, this prediction mode should be avoided when
74
+ # the exact results are not necessary.</p>
75
+ #
76
+ # <p>
77
+ # This prediction mode does not provide any guarantees for prediction
78
+ # behavior for syntactically-incorrect inputs.</p>
79
+ #
80
+ LL_EXACT_AMBIG_DETECTION = 2
81
+
82
+
83
+ #
84
+ # Computes the SLL prediction termination condition.
85
+ #
86
+ # <p>
87
+ # This method computes the SLL prediction termination condition for both of
88
+ # the following cases.</p>
89
+ #
90
+ # <ul>
91
+ # <li>The usual SLL+LL fallback upon SLL conflict</li>
92
+ # <li>Pure SLL without LL fallback</li>
93
+ # </ul>
94
+ #
95
+ # <p><strong>COMBINED SLL+LL PARSING</strong></p>
96
+ #
97
+ # <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
98
+ # ensured regardless of how the termination condition is computed by this
99
+ # method. Due to the substantially higher cost of LL prediction, the
100
+ # prediction should only fall back to LL when the additional lookahead
101
+ # cannot lead to a unique SLL prediction.</p>
102
+ #
103
+ # <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
104
+ # conflicting subsets should fall back to full LL, even if the
105
+ # configuration sets don't resolve to the same alternative (e.g.
106
+ # {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
107
+ # configuration, SLL could continue with the hopes that more lookahead will
108
+ # resolve via one of those non-conflicting configurations.</p>
109
+ #
110
+ # <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
111
+ # stops when it sees only conflicting configuration subsets. In contrast,
112
+ # full LL keeps going when there is uncertainty.</p>
113
+ #
114
+ # <p><strong>HEURISTIC</strong></p>
115
+ #
116
+ # <p>As a heuristic, we stop prediction when we see any conflicting subset
117
+ # unless we see a state that only has one alternative associated with it.
118
+ # The single-alt-state thing lets prediction continue upon rules like
119
+ # (otherwise, it would admit defeat too soon):</p>
120
+ #
121
+ # <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}</p>
122
+ #
123
+ # <p>When the ATN simulation reaches the state before {@code ';'}, it has a
124
+ # DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
125
+ # {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
126
+ # processing this node because alternative to has another way to continue,
127
+ # via {@code [6|2|[]]}.</p>
128
+ #
129
+ # <p>It also let's us continue for this rule:</p>
130
+ #
131
+ # <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}</p>
132
+ #
133
+ # <p>After matching input A, we reach the stop state for rule A, state 1.
134
+ # State 8 is the state right before B. Clearly alternatives 1 and 2
135
+ # conflict and no amount of further lookahead will separate the two.
136
+ # However, alternative 3 will be able to continue and so we do not stop
137
+ # working on this state. In the previous example, we're concerned with
138
+ # states associated with the conflicting alternatives. Here alt 3 is not
139
+ # associated with the conflicting configs, but since we can continue
140
+ # looking for input reasonably, don't declare the state done.</p>
141
+ #
142
+ # <p><strong>PURE SLL PARSING</strong></p>
143
+ #
144
+ # <p>To handle pure SLL parsing, all we have to do is make sure that we
145
+ # combine stack contexts for configurations that differ only by semantic
146
+ # predicate. From there, we can do the usual SLL termination heuristic.</p>
147
+ #
148
+ # <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
149
+ #
150
+ # <p>SLL decisions don't evaluate predicates until after they reach DFA stop
151
+ # states because they need to create the DFA cache that works in all
152
+ # semantic situations. In contrast, full LL evaluates predicates collected
153
+ # during start state computation so it can ignore predicates thereafter.
154
+ # This means that SLL termination detection can totally ignore semantic
155
+ # predicates.</p>
156
+ #
157
+ # <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
158
+ # semantic predicate contexts so we might see two configurations like the
159
+ # following.</p>
160
+ #
161
+ # <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
162
+ #
163
+ # <p>Before testing these configurations against others, we have to merge
164
+ # {@code x} and {@code x'} (without modifying the existing configurations).
165
+ # For example, we test {@code (x+x')==x''} when looking for conflicts in
166
+ # the following configurations.</p>
167
+ #
168
+ # <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}</p>
169
+ #
170
+ # <p>If the configuration set has predicates (as indicated by
171
+ # {@link ATNConfigSet#hasSemanticContext}), this algorithm makes a copy of
172
+ # the configurations to strip out all of the predicates so that a standard
173
+ # {@link ATNConfigSet} will merge everything ignoring predicates.</p>
174
+ #
175
+ @classmethod
176
+ def hasSLLConflictTerminatingPrediction(cls, mode:PredictionMode, configs:ATNConfigSet):
177
+ # Configs in rule stop states indicate reaching the end of the decision
178
+ # rule (local context) or end of start rule (full context). If all
179
+ # configs meet this condition, then none of the configurations is able
180
+ # to match additional input so we terminate prediction.
181
+ #
182
+ if cls.allConfigsInRuleStopStates(configs):
183
+ return True
184
+
185
+ # pure SLL mode parsing
186
+ if mode == PredictionMode.SLL:
187
+ # Don't bother with combining configs from different semantic
188
+ # contexts if we can fail over to full LL; costs more time
189
+ # since we'll often fail over anyway.
190
+ if configs.hasSemanticContext:
191
+ # dup configs, tossing out semantic predicates
192
+ dup = ATNConfigSet()
193
+ for c in configs:
194
+ c = ATNConfig(config=c, semantic=SemanticContext.NONE)
195
+ dup.add(c)
196
+ configs = dup
197
+ # now we have combined contexts for configs with dissimilar preds
198
+
199
+ # pure SLL or combined SLL+LL mode parsing
200
+ altsets = cls.getConflictingAltSubsets(configs)
201
+ return cls.hasConflictingAltSet(altsets) and not cls.hasStateAssociatedWithOneAlt(configs)
202
+
203
+ # Checks if any configuration in {@code configs} is in a
204
+ # {@link RuleStopState}. Configurations meeting this condition have reached
205
+ # the end of the decision rule (local context) or end of start rule (full
206
+ # context).
207
+ #
208
+ # @param configs the configuration set to test
209
+ # @return {@code true} if any configuration in {@code configs} is in a
210
+ # {@link RuleStopState}, otherwise {@code false}
211
+ @classmethod
212
+ def hasConfigInRuleStopState(cls, configs:ATNConfigSet):
213
+ return any(isinstance(cfg.state, RuleStopState) for cfg in configs)
214
+
215
+ # Checks if all configurations in {@code configs} are in a
216
+ # {@link RuleStopState}. Configurations meeting this condition have reached
217
+ # the end of the decision rule (local context) or end of start rule (full
218
+ # context).
219
+ #
220
+ # @param configs the configuration set to test
221
+ # @return {@code true} if all configurations in {@code configs} are in a
222
+ # {@link RuleStopState}, otherwise {@code false}
223
+ @classmethod
224
+ def allConfigsInRuleStopStates(cls, configs:ATNConfigSet):
225
+ return all(isinstance(cfg.state, RuleStopState) for cfg in configs)
226
+
227
+ #
228
+ # Full LL prediction termination.
229
+ #
230
+ # <p>Can we stop looking ahead during ATN simulation or is there some
231
+ # uncertainty as to which alternative we will ultimately pick, after
232
+ # consuming more input? Even if there are partial conflicts, we might know
233
+ # that everything is going to resolve to the same minimum alternative. That
234
+ # means we can stop since no more lookahead will change that fact. On the
235
+ # other hand, there might be multiple conflicts that resolve to different
236
+ # minimums. That means we need more look ahead to decide which of those
237
+ # alternatives we should predict.</p>
238
+ #
239
+ # <p>The basic idea is to split the set of configurations {@code C}, into
240
+ # conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
241
+ # non-conflicting configurations. Two configurations conflict if they have
242
+ # identical {@link ATNConfig#state} and {@link ATNConfig#context} values
243
+ # but different {@link ATNConfig#alt} value, e.g. {@code (s, i, ctx, _)}
244
+ # and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
245
+ #
246
+ # <p>Reduce these configuration subsets to the set of possible alternatives.
247
+ # You can compute the alternative subsets in one pass as follows:</p>
248
+ #
249
+ # <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
250
+ # {@code C} holding {@code s} and {@code ctx} fixed.</p>
251
+ #
252
+ # <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
253
+ #
254
+ # <pre>
255
+ # map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
256
+ # alt and not pred
257
+ # </pre>
258
+ #
259
+ # <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
260
+ #
261
+ # <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
262
+ # {@code s} and {@code ctx}.</p>
263
+ #
264
+ # <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
265
+ # the union of these alternative subsets is a singleton, then no amount of
266
+ # more lookahead will help us. We will always pick that alternative. If,
267
+ # however, there is more than one alternative, then we are uncertain which
268
+ # alternative to predict and must continue looking for resolution. We may
269
+ # or may not discover an ambiguity in the future, even if there are no
270
+ # conflicting subsets this round.</p>
271
+ #
272
+ # <p>The biggest sin is to terminate early because it means we've made a
273
+ # decision but were uncertain as to the eventual outcome. We haven't used
274
+ # enough lookahead. On the other hand, announcing a conflict too late is no
275
+ # big deal; you will still have the conflict. It's just inefficient. It
276
+ # might even look until the end of file.</p>
277
+ #
278
+ # <p>No special consideration for semantic predicates is required because
279
+ # predicates are evaluated on-the-fly for full LL prediction, ensuring that
280
+ # no configuration contains a semantic context during the termination
281
+ # check.</p>
282
+ #
283
+ # <p><strong>CONFLICTING CONFIGS</strong></p>
284
+ #
285
+ # <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
286
+ # when {@code i!=j} but {@code x=x'}. Because we merge all
287
+ # {@code (s, i, _)} configurations together, that means that there are at
288
+ # most {@code n} configurations associated with state {@code s} for
289
+ # {@code n} possible alternatives in the decision. The merged stacks
290
+ # complicate the comparison of configuration contexts {@code x} and
291
+ # {@code x'}. Sam checks to see if one is a subset of the other by calling
292
+ # merge and checking to see if the merged result is either {@code x} or
293
+ # {@code x'}. If the {@code x} associated with lowest alternative {@code i}
294
+ # is the superset, then {@code i} is the only possible prediction since the
295
+ # others resolve to {@code min(i)} as well. However, if {@code x} is
296
+ # associated with {@code j>i} then at least one stack configuration for
297
+ # {@code j} is not in conflict with alternative {@code i}. The algorithm
298
+ # should keep going, looking for more lookahead due to the uncertainty.</p>
299
+ #
300
+ # <p>For simplicity, I'm doing a equality check between {@code x} and
301
+ # {@code x'} that lets the algorithm continue to consume lookahead longer
302
+ # than necessary. The reason I like the equality is of course the
303
+ # simplicity but also because that is the test you need to detect the
304
+ # alternatives that are actually in conflict.</p>
305
+ #
306
+ # <p><strong>CONTINUE/STOP RULE</strong></p>
307
+ #
308
+ # <p>Continue if union of resolved alternative sets from non-conflicting and
309
+ # conflicting alternative subsets has more than one alternative. We are
310
+ # uncertain about which alternative to predict.</p>
311
+ #
312
+ # <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
313
+ # alternatives are still in the running for the amount of input we've
314
+ # consumed at this point. The conflicting sets let us to strip away
315
+ # configurations that won't lead to more states because we resolve
316
+ # conflicts to the configuration with a minimum alternate for the
317
+ # conflicting set.</p>
318
+ #
319
+ # <p><strong>CASES</strong></p>
320
+ #
321
+ # <ul>
322
+ #
323
+ # <li>no conflicts and more than 1 alternative in set =&gt; continue</li>
324
+ #
325
+ # <li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
326
+ # {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
327
+ # {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
328
+ # {@code {1,3}} =&gt; continue
329
+ # </li>
330
+ #
331
+ # <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
332
+ # {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
333
+ # {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
334
+ # {@code {1}} =&gt; stop and predict 1</li>
335
+ #
336
+ # <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
337
+ # {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
338
+ # {@code {1}} = {@code {1}} =&gt; stop and predict 1, can announce
339
+ # ambiguity {@code {1,2}}</li>
340
+ #
341
+ # <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
342
+ # {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
343
+ # {@code {2}} = {@code {1,2}} =&gt; continue</li>
344
+ #
345
+ # <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
346
+ # {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
347
+ # {@code {3}} = {@code {1,3}} =&gt; continue</li>
348
+ #
349
+ # </ul>
350
+ #
351
+ # <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
352
+ #
353
+ # <p>If all states report the same conflicting set of alternatives, then we
354
+ # know we have the exact ambiguity set.</p>
355
+ #
356
+ # <p><code>|A_<em>i</em>|&gt;1</code> and
357
+ # <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
358
+ #
359
+ # <p>In other words, we continue examining lookahead until all {@code A_i}
360
+ # have more than one alternative and all {@code A_i} are the same. If
361
+ # {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
362
+ # because the resolved set is {@code {1}}. To determine what the real
363
+ # ambiguity is, we have to know whether the ambiguity is between one and
364
+ # two or one and three so we keep going. We can only stop prediction when
365
+ # we need exact ambiguity detection when the sets look like
366
+ # {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
367
+ #
368
+ @classmethod
369
+ def resolvesToJustOneViableAlt(cls, altsets:list):
370
+ return cls.getSingleViableAlt(altsets)
371
+
372
+ #
373
+ # Determines if every alternative subset in {@code altsets} contains more
374
+ # than one alternative.
375
+ #
376
+ # @param altsets a collection of alternative subsets
377
+ # @return {@code true} if every {@link BitSet} in {@code altsets} has
378
+ # {@link BitSet#cardinality cardinality} &gt; 1, otherwise {@code false}
379
+ #
380
+ @classmethod
381
+ def allSubsetsConflict(cls, altsets:list):
382
+ return not cls.hasNonConflictingAltSet(altsets)
383
+
384
+ #
385
+ # Determines if any single alternative subset in {@code altsets} contains
386
+ # exactly one alternative.
387
+ #
388
+ # @param altsets a collection of alternative subsets
389
+ # @return {@code true} if {@code altsets} contains a {@link BitSet} with
390
+ # {@link BitSet#cardinality cardinality} 1, otherwise {@code false}
391
+ #
392
+ @classmethod
393
+ def hasNonConflictingAltSet(cls, altsets:list):
394
+ return any(len(alts) == 1 for alts in altsets)
395
+
396
+ #
397
+ # Determines if any single alternative subset in {@code altsets} contains
398
+ # more than one alternative.
399
+ #
400
+ # @param altsets a collection of alternative subsets
401
+ # @return {@code true} if {@code altsets} contains a {@link BitSet} with
402
+ # {@link BitSet#cardinality cardinality} &gt; 1, otherwise {@code false}
403
+ #
404
+ @classmethod
405
+ def hasConflictingAltSet(cls, altsets:list):
406
+ return any(len(alts) > 1 for alts in altsets)
407
+
408
+ #
409
+ # Determines if every alternative subset in {@code altsets} is equivalent.
410
+ #
411
+ # @param altsets a collection of alternative subsets
412
+ # @return {@code true} if every member of {@code altsets} is equal to the
413
+ # others, otherwise {@code false}
414
+ #
415
+ @classmethod
416
+ def allSubsetsEqual(cls, altsets:list):
417
+ if not altsets:
418
+ return True
419
+ first = next(iter(altsets))
420
+ return all(alts == first for alts in iter(altsets))
421
+
422
+ #
423
+ # Returns the unique alternative predicted by all alternative subsets in
424
+ # {@code altsets}. If no such alternative exists, this method returns
425
+ # {@link ATN#INVALID_ALT_NUMBER}.
426
+ #
427
+ # @param altsets a collection of alternative subsets
428
+ #
429
+ @classmethod
430
+ def getUniqueAlt(cls, altsets:list):
431
+ all = cls.getAlts(altsets)
432
+ if len(all)==1:
433
+ return next(iter(all))
434
+ return ATN.INVALID_ALT_NUMBER
435
+
436
+ # Gets the complete set of represented alternatives for a collection of
437
+ # alternative subsets. This method returns the union of each {@link BitSet}
438
+ # in {@code altsets}.
439
+ #
440
+ # @param altsets a collection of alternative subsets
441
+ # @return the set of represented alternatives in {@code altsets}
442
+ #
443
+ @classmethod
444
+ def getAlts(cls, altsets:list):
445
+ return set.union(*altsets)
446
+
447
+ #
448
+ # This function gets the conflicting alt subsets from a configuration set.
449
+ # For each configuration {@code c} in {@code configs}:
450
+ #
451
+ # <pre>
452
+ # map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
453
+ # alt and not pred
454
+ # </pre>
455
+ #
456
+ @classmethod
457
+ def getConflictingAltSubsets(cls, configs:ATNConfigSet):
458
+ configToAlts = dict()
459
+ for c in configs:
460
+ h = hash((c.state.stateNumber, c.context))
461
+ alts = configToAlts.get(h, None)
462
+ if alts is None:
463
+ alts = set()
464
+ configToAlts[h] = alts
465
+ alts.add(c.alt)
466
+ return configToAlts.values()
467
+
468
+ #
469
+ # Get a map from state to alt subset from a configuration set. For each
470
+ # configuration {@code c} in {@code configs}:
471
+ #
472
+ # <pre>
473
+ # map[c.{@link ATNConfig#state state}] U= c.{@link ATNConfig#alt alt}
474
+ # </pre>
475
+ #
476
+ @classmethod
477
+ def getStateToAltMap(cls, configs:ATNConfigSet):
478
+ m = dict()
479
+ for c in configs:
480
+ alts = m.get(c.state, None)
481
+ if alts is None:
482
+ alts = set()
483
+ m[c.state] = alts
484
+ alts.add(c.alt)
485
+ return m
486
+
487
+ @classmethod
488
+ def hasStateAssociatedWithOneAlt(cls, configs:ATNConfigSet):
489
+ return any(len(alts) == 1 for alts in cls.getStateToAltMap(configs).values())
490
+
491
+ @classmethod
492
+ def getSingleViableAlt(cls, altsets:list):
493
+ viableAlts = set()
494
+ for alts in altsets:
495
+ minAlt = min(alts)
496
+ viableAlts.add(minAlt)
497
+ if len(viableAlts)>1 : # more than 1 viable alt
498
+ return ATN.INVALID_ALT_NUMBER
499
+ return min(viableAlts)
evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/ATNConfig.cpython-310.pyc ADDED
Binary file (4.2 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/ATNConfigSet.cpython-310.pyc ADDED
Binary file (6.22 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/ATNSimulator.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/ATNType.cpython-310.pyc ADDED
Binary file (582 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/PredictionMode.cpython-310.pyc ADDED
Binary file (4.95 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/atn/__pycache__/SemanticContext.cpython-310.pyc ADDED
Binary file (7.04 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/DFASerializer.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #/
6
+
7
+ # A DFA walker that knows how to dump them to serialized strings.#/
8
+ from io import StringIO
9
+ from antlr4 import DFA
10
+ from antlr4.Utils import str_list
11
+ from antlr4.dfa.DFAState import DFAState
12
+
13
+
14
+ class DFASerializer(object):
15
+ __slots__ = ('dfa', 'literalNames', 'symbolicNames')
16
+
17
+ def __init__(self, dfa:DFA, literalNames:list=None, symbolicNames:list=None):
18
+ self.dfa = dfa
19
+ self.literalNames = literalNames
20
+ self.symbolicNames = symbolicNames
21
+
22
+ def __str__(self):
23
+ if self.dfa.s0 is None:
24
+ return None
25
+ with StringIO() as buf:
26
+ for s in self.dfa.sortedStates():
27
+ n = 0
28
+ if s.edges is not None:
29
+ n = len(s.edges)
30
+ for i in range(0, n):
31
+ t = s.edges[i]
32
+ if t is not None and t.stateNumber != 0x7FFFFFFF:
33
+ buf.write(self.getStateString(s))
34
+ label = self.getEdgeLabel(i)
35
+ buf.write("-")
36
+ buf.write(label)
37
+ buf.write("->")
38
+ buf.write(self.getStateString(t))
39
+ buf.write('\n')
40
+ output = buf.getvalue()
41
+ if len(output)==0:
42
+ return None
43
+ else:
44
+ return output
45
+
46
+ def getEdgeLabel(self, i:int):
47
+ if i==0:
48
+ return "EOF"
49
+ if self.literalNames is not None and i<=len(self.literalNames):
50
+ return self.literalNames[i-1]
51
+ elif self.symbolicNames is not None and i<=len(self.symbolicNames):
52
+ return self.symbolicNames[i-1]
53
+ else:
54
+ return str(i-1)
55
+
56
+ def getStateString(self, s:DFAState):
57
+ n = s.stateNumber
58
+ baseStateStr = ( ":" if s.isAcceptState else "") + "s" + str(n) + ( "^" if s.requiresFullContext else "")
59
+ if s.isAcceptState:
60
+ if s.predicates is not None:
61
+ return baseStateStr + "=>" + str_list(s.predicates)
62
+ else:
63
+ return baseStateStr + "=>" + str(s.prediction)
64
+ else:
65
+ return baseStateStr
66
+
67
+ class LexerDFASerializer(DFASerializer):
68
+
69
+ def __init__(self, dfa:DFA):
70
+ super().__init__(dfa, None)
71
+
72
+ def getEdgeLabel(self, i:int):
73
+ return "'" + chr(i) + "'"
evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/DFAState.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #/
6
+
7
+ # Map a predicate to a predicted alternative.#/
8
+ from io import StringIO
9
+ from antlr4.atn.ATNConfigSet import ATNConfigSet
10
+ from antlr4.atn.SemanticContext import SemanticContext
11
+
12
+
13
+ class PredPrediction(object):
14
+ __slots__ = ('alt', 'pred')
15
+
16
+ def __init__(self, pred:SemanticContext, alt:int):
17
+ self.alt = alt
18
+ self.pred = pred
19
+
20
+ def __str__(self):
21
+ return "(" + str(self.pred) + ", " + str(self.alt) + ")"
22
+
23
+ # A DFA state represents a set of possible ATN configurations.
24
+ # As Aho, Sethi, Ullman p. 117 says "The DFA uses its state
25
+ # to keep track of all possible states the ATN can be in after
26
+ # reading each input symbol. That is to say, after reading
27
+ # input a1a2..an, the DFA is in a state that represents the
28
+ # subset T of the states of the ATN that are reachable from the
29
+ # ATN's start state along some path labeled a1a2..an."
30
+ # In conventional NFA&rarr;DFA conversion, therefore, the subset T
31
+ # would be a bitset representing the set of states the
32
+ # ATN could be in. We need to track the alt predicted by each
33
+ # state as well, however. More importantly, we need to maintain
34
+ # a stack of states, tracking the closure operations as they
35
+ # jump from rule to rule, emulating rule invocations (method calls).
36
+ # I have to add a stack to simulate the proper lookahead sequences for
37
+ # the underlying LL grammar from which the ATN was derived.
38
+ #
39
+ # <p>I use a set of ATNConfig objects not simple states. An ATNConfig
40
+ # is both a state (ala normal conversion) and a RuleContext describing
41
+ # the chain of rules (if any) followed to arrive at that state.</p>
42
+ #
43
+ # <p>A DFA state may have multiple references to a particular state,
44
+ # but with different ATN contexts (with same or different alts)
45
+ # meaning that state was reached via a different set of rule invocations.</p>
46
+ #/
47
+ class DFAState(object):
48
+ __slots__ = (
49
+ 'stateNumber', 'configs', 'edges', 'isAcceptState', 'prediction',
50
+ 'lexerActionExecutor', 'requiresFullContext', 'predicates'
51
+ )
52
+
53
+ def __init__(self, stateNumber:int=-1, configs:ATNConfigSet=ATNConfigSet()):
54
+ self.stateNumber = stateNumber
55
+ self.configs = configs
56
+ # {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1)
57
+ # {@link Token#EOF} maps to {@code edges[0]}.
58
+ self.edges = None
59
+ self.isAcceptState = False
60
+ # if accept state, what ttype do we match or alt do we predict?
61
+ # This is set to {@link ATN#INVALID_ALT_NUMBER} when {@link #predicates}{@code !=null} or
62
+ # {@link #requiresFullContext}.
63
+ self.prediction = 0
64
+ self.lexerActionExecutor = None
65
+ # Indicates that this state was created during SLL prediction that
66
+ # discovered a conflict between the configurations in the state. Future
67
+ # {@link ParserATNSimulator#execATN} invocations immediately jumped doing
68
+ # full context prediction if this field is true.
69
+ self.requiresFullContext = False
70
+ # During SLL parsing, this is a list of predicates associated with the
71
+ # ATN configurations of the DFA state. When we have predicates,
72
+ # {@link #requiresFullContext} is {@code false} since full context prediction evaluates predicates
73
+ # on-the-fly. If this is not null, then {@link #prediction} is
74
+ # {@link ATN#INVALID_ALT_NUMBER}.
75
+ #
76
+ # <p>We only use these for non-{@link #requiresFullContext} but conflicting states. That
77
+ # means we know from the context (it's $ or we don't dip into outer
78
+ # context) that it's an ambiguity not a conflict.</p>
79
+ #
80
+ # <p>This list is computed by {@link ParserATNSimulator#predicateDFAState}.</p>
81
+ self.predicates = None
82
+
83
+
84
+
85
+ # Get the set of all alts mentioned by all ATN configurations in this
86
+ # DFA state.
87
+ def getAltSet(self):
88
+ if self.configs is not None:
89
+ return set(cfg.alt for cfg in self.configs) or None
90
+ return None
91
+
92
+ def __hash__(self):
93
+ return hash(self.configs)
94
+
95
+ # Two {@link DFAState} instances are equal if their ATN configuration sets
96
+ # are the same. This method is used to see if a state already exists.
97
+ #
98
+ # <p>Because the number of alternatives and number of ATN configurations are
99
+ # finite, there is a finite number of DFA states that can be processed.
100
+ # This is necessary to show that the algorithm terminates.</p>
101
+ #
102
+ # <p>Cannot test the DFA state numbers here because in
103
+ # {@link ParserATNSimulator#addDFAState} we need to know if any other state
104
+ # exists that has this exact set of ATN configurations. The
105
+ # {@link #stateNumber} is irrelevant.</p>
106
+ def __eq__(self, other):
107
+ # compare set of ATN configurations in this set with other
108
+ if self is other:
109
+ return True
110
+ elif not isinstance(other, DFAState):
111
+ return False
112
+ else:
113
+ return self.configs==other.configs
114
+
115
+ def __str__(self):
116
+ with StringIO() as buf:
117
+ buf.write(str(self.stateNumber))
118
+ buf.write(":")
119
+ buf.write(str(self.configs))
120
+ if self.isAcceptState:
121
+ buf.write("=>")
122
+ if self.predicates is not None:
123
+ buf.write(str(self.predicates))
124
+ else:
125
+ buf.write(str(self.prediction))
126
+ return buf.getvalue()
evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __author__ = 'ericvergnaud'
evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/__pycache__/DFA.cpython-310.pyc ADDED
Binary file (3.14 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/__pycache__/DFASerializer.cpython-310.pyc ADDED
Binary file (2.51 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/__pycache__/DFAState.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/dfa/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/error/DiagnosticErrorListener.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+
8
+ #
9
+ # This implementation of {@link ANTLRErrorListener} can be used to identify
10
+ # certain potential correctness and performance problems in grammars. "Reports"
11
+ # are made by calling {@link Parser#notifyErrorListeners} with the appropriate
12
+ # message.
13
+ #
14
+ # <ul>
15
+ # <li><b>Ambiguities</b>: These are cases where more than one path through the
16
+ # grammar can match the input.</li>
17
+ # <li><b>Weak context sensitivity</b>: These are cases where full-context
18
+ # prediction resolved an SLL conflict to a unique alternative which equaled the
19
+ # minimum alternative of the SLL conflict.</li>
20
+ # <li><b>Strong (forced) context sensitivity</b>: These are cases where the
21
+ # full-context prediction resolved an SLL conflict to a unique alternative,
22
+ # <em>and</em> the minimum alternative of the SLL conflict was found to not be
23
+ # a truly viable alternative. Two-stage parsing cannot be used for inputs where
24
+ # this situation occurs.</li>
25
+ # </ul>
26
+
27
+ from io import StringIO
28
+ from antlr4 import Parser, DFA
29
+ from antlr4.atn.ATNConfigSet import ATNConfigSet
30
+ from antlr4.error.ErrorListener import ErrorListener
31
+
32
+ class DiagnosticErrorListener(ErrorListener):
33
+
34
+ def __init__(self, exactOnly:bool=True):
35
+ # whether all ambiguities or only exact ambiguities are reported.
36
+ self.exactOnly = exactOnly
37
+
38
+ def reportAmbiguity(self, recognizer:Parser, dfa:DFA, startIndex:int,
39
+ stopIndex:int, exact:bool, ambigAlts:set, configs:ATNConfigSet):
40
+ if self.exactOnly and not exact:
41
+ return
42
+
43
+ with StringIO() as buf:
44
+ buf.write("reportAmbiguity d=")
45
+ buf.write(self.getDecisionDescription(recognizer, dfa))
46
+ buf.write(": ambigAlts=")
47
+ buf.write(str(self.getConflictingAlts(ambigAlts, configs)))
48
+ buf.write(", input='")
49
+ buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex))
50
+ buf.write("'")
51
+ recognizer.notifyErrorListeners(buf.getvalue())
52
+
53
+
54
+ def reportAttemptingFullContext(self, recognizer:Parser, dfa:DFA, startIndex:int,
55
+ stopIndex:int, conflictingAlts:set, configs:ATNConfigSet):
56
+ with StringIO() as buf:
57
+ buf.write("reportAttemptingFullContext d=")
58
+ buf.write(self.getDecisionDescription(recognizer, dfa))
59
+ buf.write(", input='")
60
+ buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex))
61
+ buf.write("'")
62
+ recognizer.notifyErrorListeners(buf.getvalue())
63
+
64
+ def reportContextSensitivity(self, recognizer:Parser, dfa:DFA, startIndex:int,
65
+ stopIndex:int, prediction:int, configs:ATNConfigSet):
66
+ with StringIO() as buf:
67
+ buf.write("reportContextSensitivity d=")
68
+ buf.write(self.getDecisionDescription(recognizer, dfa))
69
+ buf.write(", input='")
70
+ buf.write(recognizer.getTokenStream().getText(startIndex, stopIndex))
71
+ buf.write("'")
72
+ recognizer.notifyErrorListeners(buf.getvalue())
73
+
74
+ def getDecisionDescription(self, recognizer:Parser, dfa:DFA):
75
+ decision = dfa.decision
76
+ ruleIndex = dfa.atnStartState.ruleIndex
77
+
78
+ ruleNames = recognizer.ruleNames
79
+ if ruleIndex < 0 or ruleIndex >= len(ruleNames):
80
+ return str(decision)
81
+
82
+ ruleName = ruleNames[ruleIndex]
83
+ if ruleName is None or len(ruleName)==0:
84
+ return str(decision)
85
+
86
+ return str(decision) + " (" + ruleName + ")"
87
+
88
+ #
89
+ # Computes the set of conflicting or ambiguous alternatives from a
90
+ # configuration set, if that information was not already provided by the
91
+ # parser.
92
+ #
93
+ # @param reportedAlts The set of conflicting or ambiguous alternatives, as
94
+ # reported by the parser.
95
+ # @param configs The conflicting or ambiguous configuration set.
96
+ # @return Returns {@code reportedAlts} if it is not {@code null}, otherwise
97
+ # returns the set of alternatives represented in {@code configs}.
98
+ #
99
+ def getConflictingAlts(self, reportedAlts:set, configs:ATNConfigSet):
100
+ if reportedAlts is not None:
101
+ return reportedAlts
102
+
103
+ result = set()
104
+ for config in configs:
105
+ result.add(config.alt)
106
+
107
+ return result
evalkit_tf437/lib/python3.10/site-packages/antlr4/error/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __author__ = 'ericvergnaud'
evalkit_tf437/lib/python3.10/site-packages/antlr4/error/__pycache__/DiagnosticErrorListener.cpython-310.pyc ADDED
Binary file (2.81 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/error/__pycache__/ErrorListener.cpython-310.pyc ADDED
Binary file (2.7 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/Chunk.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ class Chunk(object):
8
+ pass
9
+
10
+ class TagChunk(Chunk):
11
+ __slots__ = ('tag', 'label')
12
+
13
+ def __init__(self, tag:str, label:str=None):
14
+ self.tag = tag
15
+ self.label = label
16
+
17
+ def __str__(self):
18
+ if self.label is None:
19
+ return self.tag
20
+ else:
21
+ return self.label + ":" + self.tag
22
+
23
+ class TextChunk(Chunk):
24
+ __slots__ = 'text'
25
+
26
+ def __init__(self, text:str):
27
+ self.text = text
28
+
29
+ def __str__(self):
30
+ return "'" + self.text + "'"
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/ParseTreePattern.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ #
8
+ # A pattern like {@code <ID> = <expr>;} converted to a {@link ParseTree} by
9
+ # {@link ParseTreePatternMatcher#compile(String, int)}.
10
+ #
11
+ from antlr4.tree.ParseTreePatternMatcher import ParseTreePatternMatcher
12
+ from antlr4.tree.Tree import ParseTree
13
+ from antlr4.xpath.XPath import XPath
14
+
15
+
16
+ class ParseTreePattern(object):
17
+ __slots__ = ('matcher', 'patternRuleIndex', 'pattern', 'patternTree')
18
+
19
+ # Construct a new instance of the {@link ParseTreePattern} class.
20
+ #
21
+ # @param matcher The {@link ParseTreePatternMatcher} which created this
22
+ # tree pattern.
23
+ # @param pattern The tree pattern in concrete syntax form.
24
+ # @param patternRuleIndex The parser rule which serves as the root of the
25
+ # tree pattern.
26
+ # @param patternTree The tree pattern in {@link ParseTree} form.
27
+ #
28
+ def __init__(self, matcher:ParseTreePatternMatcher, pattern:str, patternRuleIndex:int , patternTree:ParseTree):
29
+ self.matcher = matcher
30
+ self.patternRuleIndex = patternRuleIndex
31
+ self.pattern = pattern
32
+ self.patternTree = patternTree
33
+
34
+ #
35
+ # Match a specific parse tree against this tree pattern.
36
+ #
37
+ # @param tree The parse tree to match against this tree pattern.
38
+ # @return A {@link ParseTreeMatch} object describing the result of the
39
+ # match operation. The {@link ParseTreeMatch#succeeded()} method can be
40
+ # used to determine whether or not the match was successful.
41
+ #
42
+ def match(self, tree:ParseTree):
43
+ return self.matcher.match(tree, self)
44
+
45
+ #
46
+ # Determine whether or not a parse tree matches this tree pattern.
47
+ #
48
+ # @param tree The parse tree to match against this tree pattern.
49
+ # @return {@code true} if {@code tree} is a match for the current tree
50
+ # pattern; otherwise, {@code false}.
51
+ #
52
+ def matches(self, tree:ParseTree):
53
+ return self.matcher.match(tree, self).succeeded()
54
+
55
+ # Find all nodes using XPath and then try to match those subtrees against
56
+ # this tree pattern.
57
+ #
58
+ # @param tree The {@link ParseTree} to match against this pattern.
59
+ # @param xpath An expression matching the nodes
60
+ #
61
+ # @return A collection of {@link ParseTreeMatch} objects describing the
62
+ # successful matches. Unsuccessful matches are omitted from the result,
63
+ # regardless of the reason for the failure.
64
+ #
65
+ def findAll(self, tree:ParseTree, xpath:str):
66
+ subtrees = XPath.findAll(tree, xpath, self.matcher.parser)
67
+ matches = list()
68
+ for t in subtrees:
69
+ match = self.match(t)
70
+ if match.succeeded():
71
+ matches.append(match)
72
+ return matches
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/ParseTreePatternMatcher.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ #
8
+ # A tree pattern matching mechanism for ANTLR {@link ParseTree}s.
9
+ #
10
+ # <p>Patterns are strings of source input text with special tags representing
11
+ # token or rule references such as:</p>
12
+ #
13
+ # <p>{@code <ID> = <expr>;}</p>
14
+ #
15
+ # <p>Given a pattern start rule such as {@code statement}, this object constructs
16
+ # a {@link ParseTree} with placeholders for the {@code ID} and {@code expr}
17
+ # subtree. Then the {@link #match} routines can compare an actual
18
+ # {@link ParseTree} from a parse with this pattern. Tag {@code <ID>} matches
19
+ # any {@code ID} token and tag {@code <expr>} references the result of the
20
+ # {@code expr} rule (generally an instance of {@code ExprContext}.</p>
21
+ #
22
+ # <p>Pattern {@code x = 0;} is a similar pattern that matches the same pattern
23
+ # except that it requires the identifier to be {@code x} and the expression to
24
+ # be {@code 0}.</p>
25
+ #
26
+ # <p>The {@link #matches} routines return {@code true} or {@code false} based
27
+ # upon a match for the tree rooted at the parameter sent in. The
28
+ # {@link #match} routines return a {@link ParseTreeMatch} object that
29
+ # contains the parse tree, the parse tree pattern, and a map from tag name to
30
+ # matched nodes (more below). A subtree that fails to match, returns with
31
+ # {@link ParseTreeMatch#mismatchedNode} set to the first tree node that did not
32
+ # match.</p>
33
+ #
34
+ # <p>For efficiency, you can compile a tree pattern in string form to a
35
+ # {@link ParseTreePattern} object.</p>
36
+ #
37
+ # <p>See {@code TestParseTreeMatcher} for lots of examples.
38
+ # {@link ParseTreePattern} has two static helper methods:
39
+ # {@link ParseTreePattern#findAll} and {@link ParseTreePattern#match} that
40
+ # are easy to use but not super efficient because they create new
41
+ # {@link ParseTreePatternMatcher} objects each time and have to compile the
42
+ # pattern in string form before using it.</p>
43
+ #
44
+ # <p>The lexer and parser that you pass into the {@link ParseTreePatternMatcher}
45
+ # constructor are used to parse the pattern in string form. The lexer converts
46
+ # the {@code <ID> = <expr>;} into a sequence of four tokens (assuming lexer
47
+ # throws out whitespace or puts it on a hidden channel). Be aware that the
48
+ # input stream is reset for the lexer (but not the parser; a
49
+ # {@link ParserInterpreter} is created to parse the input.). Any user-defined
50
+ # fields you have put into the lexer might get changed when this mechanism asks
51
+ # it to scan the pattern string.</p>
52
+ #
53
+ # <p>Normally a parser does not accept token {@code <expr>} as a valid
54
+ # {@code expr} but, from the parser passed in, we create a special version of
55
+ # the underlying grammar representation (an {@link ATN}) that allows imaginary
56
+ # tokens representing rules ({@code <expr>}) to match entire rules. We call
57
+ # these <em>bypass alternatives</em>.</p>
58
+ #
59
+ # <p>Delimiters are {@code <} and {@code >}, with {@code \} as the escape string
60
+ # by default, but you can set them to whatever you want using
61
+ # {@link #setDelimiters}. You must escape both start and stop strings
62
+ # {@code \<} and {@code \>}.</p>
63
+ #
64
+ from antlr4.CommonTokenStream import CommonTokenStream
65
+ from antlr4.InputStream import InputStream
66
+ from antlr4.ParserRuleContext import ParserRuleContext
67
+ from antlr4.Lexer import Lexer
68
+ from antlr4.ListTokenSource import ListTokenSource
69
+ from antlr4.Token import Token
70
+ from antlr4.error.ErrorStrategy import BailErrorStrategy
71
+ from antlr4.error.Errors import RecognitionException, ParseCancellationException
72
+ from antlr4.tree.Chunk import TagChunk, TextChunk
73
+ from antlr4.tree.RuleTagToken import RuleTagToken
74
+ from antlr4.tree.TokenTagToken import TokenTagToken
75
+ from antlr4.tree.Tree import ParseTree, TerminalNode, RuleNode
76
+
77
+ # need forward declaration
78
+ Parser = None
79
+ ParseTreePattern = None
80
+
81
+ class CannotInvokeStartRule(Exception):
82
+
83
+ def __init__(self, e:Exception):
84
+ super().__init__(e)
85
+
86
+ class StartRuleDoesNotConsumeFullPattern(Exception):
87
+
88
+ pass
89
+
90
+
91
+ class ParseTreePatternMatcher(object):
92
+ __slots__ = ('lexer', 'parser', 'start', 'stop', 'escape')
93
+
94
+ # Constructs a {@link ParseTreePatternMatcher} or from a {@link Lexer} and
95
+ # {@link Parser} object. The lexer input stream is altered for tokenizing
96
+ # the tree patterns. The parser is used as a convenient mechanism to get
97
+ # the grammar name, plus token, rule names.
98
+ def __init__(self, lexer:Lexer, parser:Parser):
99
+ self.lexer = lexer
100
+ self.parser = parser
101
+ self.start = "<"
102
+ self.stop = ">"
103
+ self.escape = "\\" # e.g., \< and \> must escape BOTH!
104
+
105
+ # Set the delimiters used for marking rule and token tags within concrete
106
+ # syntax used by the tree pattern parser.
107
+ #
108
+ # @param start The start delimiter.
109
+ # @param stop The stop delimiter.
110
+ # @param escapeLeft The escape sequence to use for escaping a start or stop delimiter.
111
+ #
112
+ # @exception IllegalArgumentException if {@code start} is {@code null} or empty.
113
+ # @exception IllegalArgumentException if {@code stop} is {@code null} or empty.
114
+ #
115
+ def setDelimiters(self, start:str, stop:str, escapeLeft:str):
116
+ if start is None or len(start)==0:
117
+ raise Exception("start cannot be null or empty")
118
+ if stop is None or len(stop)==0:
119
+ raise Exception("stop cannot be null or empty")
120
+ self.start = start
121
+ self.stop = stop
122
+ self.escape = escapeLeft
123
+
124
+ # Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code tree}?#
125
+ def matchesRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int):
126
+ p = self.compileTreePattern(pattern, patternRuleIndex)
127
+ return self.matches(tree, p)
128
+
129
+ # Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in a
130
+ # compiled pattern instead of a string representation of a tree pattern.
131
+ #
132
+ def matchesPattern(self, tree:ParseTree, pattern:ParseTreePattern):
133
+ mismatchedNode = self.matchImpl(tree, pattern.patternTree, dict())
134
+ return mismatchedNode is None
135
+
136
+ #
137
+ # Compare {@code pattern} matched as rule {@code patternRuleIndex} against
138
+ # {@code tree} and return a {@link ParseTreeMatch} object that contains the
139
+ # matched elements, or the node at which the match failed.
140
+ #
141
+ def matchRuleIndex(self, tree:ParseTree, pattern:str, patternRuleIndex:int):
142
+ p = self.compileTreePattern(pattern, patternRuleIndex)
143
+ return self.matchPattern(tree, p)
144
+
145
+ #
146
+ # Compare {@code pattern} matched against {@code tree} and return a
147
+ # {@link ParseTreeMatch} object that contains the matched elements, or the
148
+ # node at which the match failed. Pass in a compiled pattern instead of a
149
+ # string representation of a tree pattern.
150
+ #
151
+ def matchPattern(self, tree:ParseTree, pattern:ParseTreePattern):
152
+ labels = dict()
153
+ mismatchedNode = self.matchImpl(tree, pattern.patternTree, labels)
154
+ from antlr4.tree.ParseTreeMatch import ParseTreeMatch
155
+ return ParseTreeMatch(tree, pattern, labels, mismatchedNode)
156
+
157
+ #
158
+ # For repeated use of a tree pattern, compile it to a
159
+ # {@link ParseTreePattern} using this method.
160
+ #
161
+ def compileTreePattern(self, pattern:str, patternRuleIndex:int):
162
+ tokenList = self.tokenize(pattern)
163
+ tokenSrc = ListTokenSource(tokenList)
164
+ tokens = CommonTokenStream(tokenSrc)
165
+ from antlr4.ParserInterpreter import ParserInterpreter
166
+ parserInterp = ParserInterpreter(self.parser.grammarFileName, self.parser.tokenNames,
167
+ self.parser.ruleNames, self.parser.getATNWithBypassAlts(),tokens)
168
+ tree = None
169
+ try:
170
+ parserInterp.setErrorHandler(BailErrorStrategy())
171
+ tree = parserInterp.parse(patternRuleIndex)
172
+ except ParseCancellationException as e:
173
+ raise e.cause
174
+ except RecognitionException as e:
175
+ raise e
176
+ except Exception as e:
177
+ raise CannotInvokeStartRule(e)
178
+
179
+ # Make sure tree pattern compilation checks for a complete parse
180
+ if tokens.LA(1)!=Token.EOF:
181
+ raise StartRuleDoesNotConsumeFullPattern()
182
+
183
+ from antlr4.tree.ParseTreePattern import ParseTreePattern
184
+ return ParseTreePattern(self, pattern, patternRuleIndex, tree)
185
+
186
+ #
187
+ # Recursively walk {@code tree} against {@code patternTree}, filling
188
+ # {@code match.}{@link ParseTreeMatch#labels labels}.
189
+ #
190
+ # @return the first node encountered in {@code tree} which does not match
191
+ # a corresponding node in {@code patternTree}, or {@code null} if the match
192
+ # was successful. The specific node returned depends on the matching
193
+ # algorithm used by the implementation, and may be overridden.
194
+ #
195
+ def matchImpl(self, tree:ParseTree, patternTree:ParseTree, labels:dict):
196
+ if tree is None:
197
+ raise Exception("tree cannot be null")
198
+ if patternTree is None:
199
+ raise Exception("patternTree cannot be null")
200
+
201
+ # x and <ID>, x and y, or x and x; or could be mismatched types
202
+ if isinstance(tree, TerminalNode) and isinstance(patternTree, TerminalNode ):
203
+ mismatchedNode = None
204
+ # both are tokens and they have same type
205
+ if tree.symbol.type == patternTree.symbol.type:
206
+ if isinstance( patternTree.symbol, TokenTagToken ): # x and <ID>
207
+ tokenTagToken = patternTree.symbol
208
+ # track label->list-of-nodes for both token name and label (if any)
209
+ self.map(labels, tokenTagToken.tokenName, tree)
210
+ if tokenTagToken.label is not None:
211
+ self.map(labels, tokenTagToken.label, tree)
212
+ elif tree.getText()==patternTree.getText():
213
+ # x and x
214
+ pass
215
+ else:
216
+ # x and y
217
+ if mismatchedNode is None:
218
+ mismatchedNode = tree
219
+ else:
220
+ if mismatchedNode is None:
221
+ mismatchedNode = tree
222
+
223
+ return mismatchedNode
224
+
225
+ if isinstance(tree, ParserRuleContext) and isinstance(patternTree, ParserRuleContext):
226
+ mismatchedNode = None
227
+ # (expr ...) and <expr>
228
+ ruleTagToken = self.getRuleTagToken(patternTree)
229
+ if ruleTagToken is not None:
230
+ m = None
231
+ if tree.ruleContext.ruleIndex == patternTree.ruleContext.ruleIndex:
232
+ # track label->list-of-nodes for both rule name and label (if any)
233
+ self.map(labels, ruleTagToken.ruleName, tree)
234
+ if ruleTagToken.label is not None:
235
+ self.map(labels, ruleTagToken.label, tree)
236
+ else:
237
+ if mismatchedNode is None:
238
+ mismatchedNode = tree
239
+
240
+ return mismatchedNode
241
+
242
+ # (expr ...) and (expr ...)
243
+ if tree.getChildCount()!=patternTree.getChildCount():
244
+ if mismatchedNode is None:
245
+ mismatchedNode = tree
246
+ return mismatchedNode
247
+
248
+ n = tree.getChildCount()
249
+ for i in range(0, n):
250
+ childMatch = self.matchImpl(tree.getChild(i), patternTree.getChild(i), labels)
251
+ if childMatch is not None:
252
+ return childMatch
253
+
254
+ return mismatchedNode
255
+
256
+ # if nodes aren't both tokens or both rule nodes, can't match
257
+ return tree
258
+
259
+ def map(self, labels, label, tree):
260
+ v = labels.get(label, None)
261
+ if v is None:
262
+ v = list()
263
+ labels[label] = v
264
+ v.append(tree)
265
+
266
+ # Is {@code t} {@code (expr <expr>)} subtree?#
267
+ def getRuleTagToken(self, tree:ParseTree):
268
+ if isinstance( tree, RuleNode ):
269
+ if tree.getChildCount()==1 and isinstance(tree.getChild(0), TerminalNode ):
270
+ c = tree.getChild(0)
271
+ if isinstance( c.symbol, RuleTagToken ):
272
+ return c.symbol
273
+ return None
274
+
275
+ def tokenize(self, pattern:str):
276
+ # split pattern into chunks: sea (raw input) and islands (<ID>, <expr>)
277
+ chunks = self.split(pattern)
278
+
279
+ # create token stream from text and tags
280
+ tokens = list()
281
+ for chunk in chunks:
282
+ if isinstance( chunk, TagChunk ):
283
+ # add special rule token or conjure up new token from name
284
+ if chunk.tag[0].isupper():
285
+ ttype = self.parser.getTokenType(chunk.tag)
286
+ if ttype==Token.INVALID_TYPE:
287
+ raise Exception("Unknown token " + str(chunk.tag) + " in pattern: " + pattern)
288
+ tokens.append(TokenTagToken(chunk.tag, ttype, chunk.label))
289
+ elif chunk.tag[0].islower():
290
+ ruleIndex = self.parser.getRuleIndex(chunk.tag)
291
+ if ruleIndex==-1:
292
+ raise Exception("Unknown rule " + str(chunk.tag) + " in pattern: " + pattern)
293
+ ruleImaginaryTokenType = self.parser.getATNWithBypassAlts().ruleToTokenType[ruleIndex]
294
+ tokens.append(RuleTagToken(chunk.tag, ruleImaginaryTokenType, chunk.label))
295
+ else:
296
+ raise Exception("invalid tag: " + str(chunk.tag) + " in pattern: " + pattern)
297
+ else:
298
+ self.lexer.setInputStream(InputStream(chunk.text))
299
+ t = self.lexer.nextToken()
300
+ while t.type!=Token.EOF:
301
+ tokens.append(t)
302
+ t = self.lexer.nextToken()
303
+ return tokens
304
+
305
+ # Split {@code <ID> = <e:expr> ;} into 4 chunks for tokenizing by {@link #tokenize}.#
306
+ def split(self, pattern:str):
307
+ p = 0
308
+ n = len(pattern)
309
+ chunks = list()
310
+ # find all start and stop indexes first, then collect
311
+ starts = list()
312
+ stops = list()
313
+ while p < n :
314
+ if p == pattern.find(self.escape + self.start, p):
315
+ p += len(self.escape) + len(self.start)
316
+ elif p == pattern.find(self.escape + self.stop, p):
317
+ p += len(self.escape) + len(self.stop)
318
+ elif p == pattern.find(self.start, p):
319
+ starts.append(p)
320
+ p += len(self.start)
321
+ elif p == pattern.find(self.stop, p):
322
+ stops.append(p)
323
+ p += len(self.stop)
324
+ else:
325
+ p += 1
326
+
327
+ nt = len(starts)
328
+
329
+ if nt > len(stops):
330
+ raise Exception("unterminated tag in pattern: " + pattern)
331
+ if nt < len(stops):
332
+ raise Exception("missing start tag in pattern: " + pattern)
333
+
334
+ for i in range(0, nt):
335
+ if starts[i] >= stops[i]:
336
+ raise Exception("tag delimiters out of order in pattern: " + pattern)
337
+
338
+ # collect into chunks now
339
+ if nt==0:
340
+ chunks.append(TextChunk(pattern))
341
+
342
+ if nt>0 and starts[0]>0: # copy text up to first tag into chunks
343
+ text = pattern[0:starts[0]]
344
+ chunks.add(TextChunk(text))
345
+
346
+ for i in range(0, nt):
347
+ # copy inside of <tag>
348
+ tag = pattern[starts[i] + len(self.start) : stops[i]]
349
+ ruleOrToken = tag
350
+ label = None
351
+ colon = tag.find(':')
352
+ if colon >= 0:
353
+ label = tag[0:colon]
354
+ ruleOrToken = tag[colon+1 : len(tag)]
355
+ chunks.append(TagChunk(label, ruleOrToken))
356
+ if i+1 < len(starts):
357
+ # copy from end of <tag> to start of next
358
+ text = pattern[stops[i] + len(self.stop) : starts[i + 1]]
359
+ chunks.append(TextChunk(text))
360
+
361
+ if nt > 0 :
362
+ afterLastTag = stops[nt - 1] + len(self.stop)
363
+ if afterLastTag < n : # copy text from end of last tag to end
364
+ text = pattern[afterLastTag : n]
365
+ chunks.append(TextChunk(text))
366
+
367
+ # strip out the escape sequences from text chunks but not tags
368
+ for i in range(0, len(chunks)):
369
+ c = chunks[i]
370
+ if isinstance( c, TextChunk ):
371
+ unescaped = c.text.replace(self.escape, "")
372
+ if len(unescaped) < len(c.text):
373
+ chunks[i] = TextChunk(unescaped)
374
+ return chunks
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/RuleTagToken.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ #
8
+ # A {@link Token} object representing an entire subtree matched by a parser
9
+ # rule; e.g., {@code <expr>}. These tokens are created for {@link TagChunk}
10
+ # chunks where the tag corresponds to a parser rule.
11
+ #
12
+ from antlr4.Token import Token
13
+
14
+
15
+ class RuleTagToken(Token):
16
+ __slots__ = ('label', 'ruleName')
17
+ #
18
+ # Constructs a new instance of {@link RuleTagToken} with the specified rule
19
+ # name, bypass token type, and label.
20
+ #
21
+ # @param ruleName The name of the parser rule this rule tag matches.
22
+ # @param bypassTokenType The bypass token type assigned to the parser rule.
23
+ # @param label The label associated with the rule tag, or {@code null} if
24
+ # the rule tag is unlabeled.
25
+ #
26
+ # @exception IllegalArgumentException if {@code ruleName} is {@code null}
27
+ # or empty.
28
+
29
+ def __init__(self, ruleName:str, bypassTokenType:int, label:str=None):
30
+ if ruleName is None or len(ruleName)==0:
31
+ raise Exception("ruleName cannot be null or empty.")
32
+ self.source = None
33
+ self.type = bypassTokenType # token type of the token
34
+ self.channel = Token.DEFAULT_CHANNEL # The parser ignores everything not on DEFAULT_CHANNEL
35
+ self.start = -1 # optional; return -1 if not implemented.
36
+ self.stop = -1 # optional; return -1 if not implemented.
37
+ self.tokenIndex = -1 # from 0..n-1 of the token object in the input stream
38
+ self.line = 0 # line=1..n of the 1st character
39
+ self.column = -1 # beginning of the line at which it occurs, 0..n-1
40
+ self.label = label
41
+ self._text = self.getText() # text of the token.
42
+
43
+ self.ruleName = ruleName
44
+
45
+
46
+ def getText(self):
47
+ if self.label is None:
48
+ return "<" + self.ruleName + ">"
49
+ else:
50
+ return "<" + self.label + ":" + self.ruleName + ">"
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/TokenTagToken.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
3
+ # Use of this file is governed by the BSD 3-clause license that
4
+ # can be found in the LICENSE.txt file in the project root.
5
+ #
6
+
7
+ #
8
+ # A {@link Token} object representing a token of a particular type; e.g.,
9
+ # {@code <ID>}. These tokens are created for {@link TagChunk} chunks where the
10
+ # tag corresponds to a lexer rule or token type.
11
+ #
12
+ from antlr4.Token import CommonToken
13
+
14
+
15
+ class TokenTagToken(CommonToken):
16
+ __slots__ = ('tokenName', 'label')
17
+ # Constructs a new instance of {@link TokenTagToken} with the specified
18
+ # token name, type, and label.
19
+ #
20
+ # @param tokenName The token name.
21
+ # @param type The token type.
22
+ # @param label The label associated with the token tag, or {@code null} if
23
+ # the token tag is unlabeled.
24
+ #
25
+ def __init__(self, tokenName:str, type:int, label:str=None):
26
+ super().__init__(type=type)
27
+ self.tokenName = tokenName
28
+ self.label = label
29
+ self._text = self.getText()
30
+
31
+ #
32
+ # {@inheritDoc}
33
+ #
34
+ # <p>The implementation for {@link TokenTagToken} returns the token tag
35
+ # formatted with {@code <} and {@code >} delimiters.</p>
36
+ #
37
+ def getText(self):
38
+ if self.label is None:
39
+ return "<" + self.tokenName + ">"
40
+ else:
41
+ return "<" + self.label + ":" + self.tokenName + ">"
42
+
43
+ # <p>The implementation for {@link TokenTagToken} returns a string of the form
44
+ # {@code tokenName:type}.</p>
45
+ #
46
+ def __str__(self):
47
+ return self.tokenName + ":" + str(self.type)
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/Tree.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
2
+ # Use of this file is governed by the BSD 3-clause license that
3
+ # can be found in the LICENSE.txt file in the project root.
4
+ #/
5
+
6
+
7
+ # The basic notion of a tree has a parent, a payload, and a list of children.
8
+ # It is the most abstract interface for all the trees used by ANTLR.
9
+ #/
10
+ from antlr4.Token import Token
11
+
12
+ INVALID_INTERVAL = (-1, -2)
13
+
14
+ class Tree(object):
15
+ pass
16
+
17
+ class SyntaxTree(Tree):
18
+ pass
19
+
20
+ class ParseTree(SyntaxTree):
21
+ pass
22
+
23
+ class RuleNode(ParseTree):
24
+ pass
25
+
26
+ class TerminalNode(ParseTree):
27
+ pass
28
+
29
+ class ErrorNode(TerminalNode):
30
+ pass
31
+
32
+ class ParseTreeVisitor(object):
33
+ def visit(self, tree):
34
+ return tree.accept(self)
35
+
36
+ def visitChildren(self, node):
37
+ result = self.defaultResult()
38
+ n = node.getChildCount()
39
+ for i in range(n):
40
+ if not self.shouldVisitNextChild(node, result):
41
+ return result
42
+
43
+ c = node.getChild(i)
44
+ childResult = c.accept(self)
45
+ result = self.aggregateResult(result, childResult)
46
+
47
+ return result
48
+
49
+ def visitTerminal(self, node):
50
+ return self.defaultResult()
51
+
52
+ def visitErrorNode(self, node):
53
+ return self.defaultResult()
54
+
55
+ def defaultResult(self):
56
+ return None
57
+
58
+ def aggregateResult(self, aggregate, nextResult):
59
+ return nextResult
60
+
61
+ def shouldVisitNextChild(self, node, currentResult):
62
+ return True
63
+
64
+ ParserRuleContext = None
65
+
66
+ class ParseTreeListener(object):
67
+
68
+ def visitTerminal(self, node:TerminalNode):
69
+ pass
70
+
71
+ def visitErrorNode(self, node:ErrorNode):
72
+ pass
73
+
74
+ def enterEveryRule(self, ctx:ParserRuleContext):
75
+ pass
76
+
77
+ def exitEveryRule(self, ctx:ParserRuleContext):
78
+ pass
79
+
80
+ del ParserRuleContext
81
+
82
+ class TerminalNodeImpl(TerminalNode):
83
+ __slots__ = ('parentCtx', 'symbol')
84
+
85
+ def __init__(self, symbol:Token):
86
+ self.parentCtx = None
87
+ self.symbol = symbol
88
+ def __setattr__(self, key, value):
89
+ super().__setattr__(key, value)
90
+
91
+ def getChild(self, i:int):
92
+ return None
93
+
94
+ def getSymbol(self):
95
+ return self.symbol
96
+
97
+ def getParent(self):
98
+ return self.parentCtx
99
+
100
+ def getPayload(self):
101
+ return self.symbol
102
+
103
+ def getSourceInterval(self):
104
+ if self.symbol is None:
105
+ return INVALID_INTERVAL
106
+ tokenIndex = self.symbol.tokenIndex
107
+ return (tokenIndex, tokenIndex)
108
+
109
+ def getChildCount(self):
110
+ return 0
111
+
112
+ def accept(self, visitor:ParseTreeVisitor):
113
+ return visitor.visitTerminal(self)
114
+
115
+ def getText(self):
116
+ return self.symbol.text
117
+
118
+ def __str__(self):
119
+ if self.symbol.type == Token.EOF:
120
+ return "<EOF>"
121
+ else:
122
+ return self.symbol.text
123
+
124
+ # Represents a token that was consumed during resynchronization
125
+ # rather than during a valid match operation. For example,
126
+ # we will create this kind of a node during single token insertion
127
+ # and deletion as well as during "consume until error recovery set"
128
+ # upon no viable alternative exceptions.
129
+
130
+ class ErrorNodeImpl(TerminalNodeImpl,ErrorNode):
131
+
132
+ def __init__(self, token:Token):
133
+ super().__init__(token)
134
+
135
+ def accept(self, visitor:ParseTreeVisitor):
136
+ return visitor.visitErrorNode(self)
137
+
138
+
139
+ class ParseTreeWalker(object):
140
+
141
+ DEFAULT = None
142
+
143
+ def walk(self, listener:ParseTreeListener, t:ParseTree):
144
+ """
145
+ Performs a walk on the given parse tree starting at the root and going down recursively
146
+ with depth-first search. On each node, {@link ParseTreeWalker#enterRule} is called before
147
+ recursively walking down into child nodes, then
148
+ {@link ParseTreeWalker#exitRule} is called after the recursive call to wind up.
149
+ @param listener The listener used by the walker to process grammar rules
150
+ @param t The parse tree to be walked on
151
+ """
152
+ if isinstance(t, ErrorNode):
153
+ listener.visitErrorNode(t)
154
+ return
155
+ elif isinstance(t, TerminalNode):
156
+ listener.visitTerminal(t)
157
+ return
158
+ self.enterRule(listener, t)
159
+ for child in t.getChildren():
160
+ self.walk(listener, child)
161
+ self.exitRule(listener, t)
162
+
163
+ #
164
+ # The discovery of a rule node, involves sending two events: the generic
165
+ # {@link ParseTreeListener#enterEveryRule} and a
166
+ # {@link RuleContext}-specific event. First we trigger the generic and then
167
+ # the rule specific. We to them in reverse order upon finishing the node.
168
+ #
169
+ def enterRule(self, listener:ParseTreeListener, r:RuleNode):
170
+ """
171
+ Enters a grammar rule by first triggering the generic event {@link ParseTreeListener#enterEveryRule}
172
+ then by triggering the event specific to the given parse tree node
173
+ @param listener The listener responding to the trigger events
174
+ @param r The grammar rule containing the rule context
175
+ """
176
+ ctx = r.getRuleContext()
177
+ listener.enterEveryRule(ctx)
178
+ ctx.enterRule(listener)
179
+
180
+ def exitRule(self, listener:ParseTreeListener, r:RuleNode):
181
+ """
182
+ Exits a grammar rule by first triggering the event specific to the given parse tree node
183
+ then by triggering the generic event {@link ParseTreeListener#exitEveryRule}
184
+ @param listener The listener responding to the trigger events
185
+ @param r The grammar rule containing the rule context
186
+ """
187
+ ctx = r.getRuleContext()
188
+ ctx.exitRule(listener)
189
+ listener.exitEveryRule(ctx)
190
+
191
+ ParseTreeWalker.DEFAULT = ParseTreeWalker()
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__init__.py ADDED
File without changes
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__pycache__/ParseTreeMatch.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__pycache__/RuleTagToken.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__pycache__/TokenTagToken.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__pycache__/Tree.cpython-310.pyc ADDED
Binary file (7.37 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/antlr4/tree/__pycache__/Trees.cpython-310.pyc ADDED
Binary file (3.4 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/flash_attn-2.6.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip